1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * The input core
4 *
5 * Copyright (c) 1999-2002 Vojtech Pavlik
6 */
7
8
9 #define pr_fmt(fmt) KBUILD_BASENAME ": " fmt
10
11 #include <linux/init.h>
12 #include <linux/types.h>
13 #include <linux/idr.h>
14 #include <linux/input/mt.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/random.h>
18 #include <linux/major.h>
19 #include <linux/proc_fs.h>
20 #include <linux/sched.h>
21 #include <linux/seq_file.h>
22 #include <linux/pm.h>
23 #include <linux/poll.h>
24 #include <linux/device.h>
25 #include <linux/kstrtox.h>
26 #include <linux/mutex.h>
27 #include <linux/rcupdate.h>
28 #include "input-compat.h"
29 #include "input-core-private.h"
30 #include "input-poller.h"
31
32 MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>");
33 MODULE_DESCRIPTION("Input core");
34 MODULE_LICENSE("GPL");
35
36 #define INPUT_MAX_CHAR_DEVICES 1024
37 #define INPUT_FIRST_DYNAMIC_DEV 256
38 static DEFINE_IDA(input_ida);
39
40 static LIST_HEAD(input_dev_list);
41 static LIST_HEAD(input_handler_list);
42
43 /*
44 * input_mutex protects access to both input_dev_list and input_handler_list.
45 * This also causes input_[un]register_device and input_[un]register_handler
46 * be mutually exclusive which simplifies locking in drivers implementing
47 * input handlers.
48 */
49 static DEFINE_MUTEX(input_mutex);
50
51 static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 };
52
53 static const unsigned int input_max_code[EV_CNT] = {
54 [EV_KEY] = KEY_MAX,
55 [EV_REL] = REL_MAX,
56 [EV_ABS] = ABS_MAX,
57 [EV_MSC] = MSC_MAX,
58 [EV_SW] = SW_MAX,
59 [EV_LED] = LED_MAX,
60 [EV_SND] = SND_MAX,
61 [EV_FF] = FF_MAX,
62 };
63
is_event_supported(unsigned int code,unsigned long * bm,unsigned int max)64 static inline int is_event_supported(unsigned int code,
65 unsigned long *bm, unsigned int max)
66 {
67 return code <= max && test_bit(code, bm);
68 }
69
input_defuzz_abs_event(int value,int old_val,int fuzz)70 static int input_defuzz_abs_event(int value, int old_val, int fuzz)
71 {
72 if (fuzz) {
73 if (value > old_val - fuzz / 2 && value < old_val + fuzz / 2)
74 return old_val;
75
76 if (value > old_val - fuzz && value < old_val + fuzz)
77 return (old_val * 3 + value) / 4;
78
79 if (value > old_val - fuzz * 2 && value < old_val + fuzz * 2)
80 return (old_val + value) / 2;
81 }
82
83 return value;
84 }
85
input_start_autorepeat(struct input_dev * dev,int code)86 static void input_start_autorepeat(struct input_dev *dev, int code)
87 {
88 if (test_bit(EV_REP, dev->evbit) &&
89 dev->rep[REP_PERIOD] && dev->rep[REP_DELAY] &&
90 dev->timer.function) {
91 dev->repeat_key = code;
92 mod_timer(&dev->timer,
93 jiffies + msecs_to_jiffies(dev->rep[REP_DELAY]));
94 }
95 }
96
input_stop_autorepeat(struct input_dev * dev)97 static void input_stop_autorepeat(struct input_dev *dev)
98 {
99 timer_delete(&dev->timer);
100 }
101
102 /*
103 * Pass values first through all filters and then, if event has not been
104 * filtered out, through all open handles. This order is achieved by placing
105 * filters at the head of the list of handles attached to the device, and
106 * placing regular handles at the tail of the list.
107 *
108 * This function is called with dev->event_lock held and interrupts disabled.
109 */
input_pass_values(struct input_dev * dev,struct input_value * vals,unsigned int count)110 static void input_pass_values(struct input_dev *dev,
111 struct input_value *vals, unsigned int count)
112 {
113 struct input_handle *handle;
114 struct input_value *v;
115
116 lockdep_assert_held(&dev->event_lock);
117
118 scoped_guard(rcu) {
119 handle = rcu_dereference(dev->grab);
120 if (handle) {
121 count = handle->handle_events(handle, vals, count);
122 break;
123 }
124
125 list_for_each_entry_rcu(handle, &dev->h_list, d_node) {
126 if (handle->open) {
127 count = handle->handle_events(handle, vals,
128 count);
129 if (!count)
130 break;
131 }
132 }
133 }
134
135 /* trigger auto repeat for key events */
136 if (test_bit(EV_REP, dev->evbit) && test_bit(EV_KEY, dev->evbit)) {
137 for (v = vals; v != vals + count; v++) {
138 if (v->type == EV_KEY && v->value != 2) {
139 if (v->value)
140 input_start_autorepeat(dev, v->code);
141 else
142 input_stop_autorepeat(dev);
143 }
144 }
145 }
146 }
147
148 #define INPUT_IGNORE_EVENT 0
149 #define INPUT_PASS_TO_HANDLERS 1
150 #define INPUT_PASS_TO_DEVICE 2
151 #define INPUT_SLOT 4
152 #define INPUT_FLUSH 8
153 #define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE)
154
input_handle_abs_event(struct input_dev * dev,unsigned int code,int * pval)155 static int input_handle_abs_event(struct input_dev *dev,
156 unsigned int code, int *pval)
157 {
158 struct input_mt *mt = dev->mt;
159 bool is_new_slot = false;
160 bool is_mt_event;
161 int *pold;
162
163 if (code == ABS_MT_SLOT) {
164 /*
165 * "Stage" the event; we'll flush it later, when we
166 * get actual touch data.
167 */
168 if (mt && *pval >= 0 && *pval < mt->num_slots)
169 mt->slot = *pval;
170
171 return INPUT_IGNORE_EVENT;
172 }
173
174 is_mt_event = input_is_mt_value(code);
175
176 if (!is_mt_event) {
177 pold = &dev->absinfo[code].value;
178 } else if (mt) {
179 pold = &mt->slots[mt->slot].abs[code - ABS_MT_FIRST];
180 is_new_slot = mt->slot != dev->absinfo[ABS_MT_SLOT].value;
181 } else {
182 /*
183 * Bypass filtering for multi-touch events when
184 * not employing slots.
185 */
186 pold = NULL;
187 }
188
189 if (pold) {
190 *pval = input_defuzz_abs_event(*pval, *pold,
191 dev->absinfo[code].fuzz);
192 if (*pold == *pval)
193 return INPUT_IGNORE_EVENT;
194
195 *pold = *pval;
196 }
197
198 /* Flush pending "slot" event */
199 if (is_new_slot) {
200 dev->absinfo[ABS_MT_SLOT].value = mt->slot;
201 return INPUT_PASS_TO_HANDLERS | INPUT_SLOT;
202 }
203
204 return INPUT_PASS_TO_HANDLERS;
205 }
206
input_get_disposition(struct input_dev * dev,unsigned int type,unsigned int code,int * pval)207 static int input_get_disposition(struct input_dev *dev,
208 unsigned int type, unsigned int code, int *pval)
209 {
210 int disposition = INPUT_IGNORE_EVENT;
211 int value = *pval;
212
213 /* filter-out events from inhibited devices */
214 if (dev->inhibited)
215 return INPUT_IGNORE_EVENT;
216
217 switch (type) {
218
219 case EV_SYN:
220 switch (code) {
221 case SYN_CONFIG:
222 disposition = INPUT_PASS_TO_ALL;
223 break;
224
225 case SYN_REPORT:
226 disposition = INPUT_PASS_TO_HANDLERS | INPUT_FLUSH;
227 break;
228 case SYN_MT_REPORT:
229 disposition = INPUT_PASS_TO_HANDLERS;
230 break;
231 }
232 break;
233
234 case EV_KEY:
235 if (is_event_supported(code, dev->keybit, KEY_MAX)) {
236
237 /* auto-repeat bypasses state updates */
238 if (value == 2) {
239 disposition = INPUT_PASS_TO_HANDLERS;
240 break;
241 }
242
243 if (!!test_bit(code, dev->key) != !!value) {
244
245 __change_bit(code, dev->key);
246 disposition = INPUT_PASS_TO_HANDLERS;
247 }
248 }
249 break;
250
251 case EV_SW:
252 if (is_event_supported(code, dev->swbit, SW_MAX) &&
253 !!test_bit(code, dev->sw) != !!value) {
254
255 __change_bit(code, dev->sw);
256 disposition = INPUT_PASS_TO_HANDLERS;
257 }
258 break;
259
260 case EV_ABS:
261 if (is_event_supported(code, dev->absbit, ABS_MAX))
262 disposition = input_handle_abs_event(dev, code, &value);
263
264 break;
265
266 case EV_REL:
267 if (is_event_supported(code, dev->relbit, REL_MAX) && value)
268 disposition = INPUT_PASS_TO_HANDLERS;
269
270 break;
271
272 case EV_MSC:
273 if (is_event_supported(code, dev->mscbit, MSC_MAX))
274 disposition = INPUT_PASS_TO_ALL;
275
276 break;
277
278 case EV_LED:
279 if (is_event_supported(code, dev->ledbit, LED_MAX) &&
280 !!test_bit(code, dev->led) != !!value) {
281
282 __change_bit(code, dev->led);
283 disposition = INPUT_PASS_TO_ALL;
284 }
285 break;
286
287 case EV_SND:
288 if (is_event_supported(code, dev->sndbit, SND_MAX)) {
289
290 if (!!test_bit(code, dev->snd) != !!value)
291 __change_bit(code, dev->snd);
292 disposition = INPUT_PASS_TO_ALL;
293 }
294 break;
295
296 case EV_REP:
297 if (code <= REP_MAX && value >= 0 && dev->rep[code] != value) {
298 dev->rep[code] = value;
299 disposition = INPUT_PASS_TO_ALL;
300 }
301 break;
302
303 case EV_FF:
304 if (value >= 0)
305 disposition = INPUT_PASS_TO_ALL;
306 break;
307
308 case EV_PWR:
309 disposition = INPUT_PASS_TO_ALL;
310 break;
311 }
312
313 *pval = value;
314 return disposition;
315 }
316
input_event_dispose(struct input_dev * dev,int disposition,unsigned int type,unsigned int code,int value)317 static void input_event_dispose(struct input_dev *dev, int disposition,
318 unsigned int type, unsigned int code, int value)
319 {
320 if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
321 dev->event(dev, type, code, value);
322
323 if (disposition & INPUT_PASS_TO_HANDLERS) {
324 struct input_value *v;
325
326 if (disposition & INPUT_SLOT) {
327 v = &dev->vals[dev->num_vals++];
328 v->type = EV_ABS;
329 v->code = ABS_MT_SLOT;
330 v->value = dev->mt->slot;
331 }
332
333 v = &dev->vals[dev->num_vals++];
334 v->type = type;
335 v->code = code;
336 v->value = value;
337 }
338
339 if (disposition & INPUT_FLUSH) {
340 if (dev->num_vals >= 2)
341 input_pass_values(dev, dev->vals, dev->num_vals);
342 dev->num_vals = 0;
343 /*
344 * Reset the timestamp on flush so we won't end up
345 * with a stale one. Note we only need to reset the
346 * monolithic one as we use its presence when deciding
347 * whether to generate a synthetic timestamp.
348 */
349 dev->timestamp[INPUT_CLK_MONO] = ktime_set(0, 0);
350 } else if (dev->num_vals >= dev->max_vals - 2) {
351 dev->vals[dev->num_vals++] = input_value_sync;
352 input_pass_values(dev, dev->vals, dev->num_vals);
353 dev->num_vals = 0;
354 }
355 }
356
input_handle_event(struct input_dev * dev,unsigned int type,unsigned int code,int value)357 void input_handle_event(struct input_dev *dev,
358 unsigned int type, unsigned int code, int value)
359 {
360 int disposition;
361
362 lockdep_assert_held(&dev->event_lock);
363
364 disposition = input_get_disposition(dev, type, code, &value);
365 if (disposition != INPUT_IGNORE_EVENT) {
366 if (type != EV_SYN)
367 add_input_randomness(type, code, value);
368
369 input_event_dispose(dev, disposition, type, code, value);
370 }
371 }
372
373 /**
374 * input_event() - report new input event
375 * @dev: device that generated the event
376 * @type: type of the event
377 * @code: event code
378 * @value: value of the event
379 *
380 * This function should be used by drivers implementing various input
381 * devices to report input events. See also input_inject_event().
382 *
383 * NOTE: input_event() may be safely used right after input device was
384 * allocated with input_allocate_device(), even before it is registered
385 * with input_register_device(), but the event will not reach any of the
386 * input handlers. Such early invocation of input_event() may be used
387 * to 'seed' initial state of a switch or initial position of absolute
388 * axis, etc.
389 */
input_event(struct input_dev * dev,unsigned int type,unsigned int code,int value)390 void input_event(struct input_dev *dev,
391 unsigned int type, unsigned int code, int value)
392 {
393 if (is_event_supported(type, dev->evbit, EV_MAX)) {
394 guard(spinlock_irqsave)(&dev->event_lock);
395 input_handle_event(dev, type, code, value);
396 }
397 }
398 EXPORT_SYMBOL(input_event);
399
400 /**
401 * input_inject_event() - send input event from input handler
402 * @handle: input handle to send event through
403 * @type: type of the event
404 * @code: event code
405 * @value: value of the event
406 *
407 * Similar to input_event() but will ignore event if device is
408 * "grabbed" and handle injecting event is not the one that owns
409 * the device.
410 */
input_inject_event(struct input_handle * handle,unsigned int type,unsigned int code,int value)411 void input_inject_event(struct input_handle *handle,
412 unsigned int type, unsigned int code, int value)
413 {
414 struct input_dev *dev = handle->dev;
415 struct input_handle *grab;
416
417 if (is_event_supported(type, dev->evbit, EV_MAX)) {
418 guard(spinlock_irqsave)(&dev->event_lock);
419 guard(rcu)();
420
421 grab = rcu_dereference(dev->grab);
422 if (!grab || grab == handle)
423 input_handle_event(dev, type, code, value);
424
425 }
426 }
427 EXPORT_SYMBOL(input_inject_event);
428
429 /**
430 * input_alloc_absinfo - allocates array of input_absinfo structs
431 * @dev: the input device emitting absolute events
432 *
433 * If the absinfo struct the caller asked for is already allocated, this
434 * functions will not do anything.
435 */
input_alloc_absinfo(struct input_dev * dev)436 void input_alloc_absinfo(struct input_dev *dev)
437 {
438 if (dev->absinfo)
439 return;
440
441 dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo), GFP_KERNEL);
442 if (!dev->absinfo) {
443 dev_err(dev->dev.parent ?: &dev->dev,
444 "%s: unable to allocate memory\n", __func__);
445 /*
446 * We will handle this allocation failure in
447 * input_register_device() when we refuse to register input
448 * device with ABS bits but without absinfo.
449 */
450 }
451 }
452 EXPORT_SYMBOL(input_alloc_absinfo);
453
input_set_abs_params(struct input_dev * dev,unsigned int axis,int min,int max,int fuzz,int flat)454 void input_set_abs_params(struct input_dev *dev, unsigned int axis,
455 int min, int max, int fuzz, int flat)
456 {
457 struct input_absinfo *absinfo;
458
459 __set_bit(EV_ABS, dev->evbit);
460 __set_bit(axis, dev->absbit);
461
462 input_alloc_absinfo(dev);
463 if (!dev->absinfo)
464 return;
465
466 absinfo = &dev->absinfo[axis];
467 absinfo->minimum = min;
468 absinfo->maximum = max;
469 absinfo->fuzz = fuzz;
470 absinfo->flat = flat;
471 }
472 EXPORT_SYMBOL(input_set_abs_params);
473
474 /**
475 * input_copy_abs - Copy absinfo from one input_dev to another
476 * @dst: Destination input device to copy the abs settings to
477 * @dst_axis: ABS_* value selecting the destination axis
478 * @src: Source input device to copy the abs settings from
479 * @src_axis: ABS_* value selecting the source axis
480 *
481 * Set absinfo for the selected destination axis by copying it from
482 * the specified source input device's source axis.
483 * This is useful to e.g. setup a pen/stylus input-device for combined
484 * touchscreen/pen hardware where the pen uses the same coordinates as
485 * the touchscreen.
486 */
input_copy_abs(struct input_dev * dst,unsigned int dst_axis,const struct input_dev * src,unsigned int src_axis)487 void input_copy_abs(struct input_dev *dst, unsigned int dst_axis,
488 const struct input_dev *src, unsigned int src_axis)
489 {
490 /* src must have EV_ABS and src_axis set */
491 if (WARN_ON(!(test_bit(EV_ABS, src->evbit) &&
492 test_bit(src_axis, src->absbit))))
493 return;
494
495 /*
496 * input_alloc_absinfo() may have failed for the source. Our caller is
497 * expected to catch this when registering the input devices, which may
498 * happen after the input_copy_abs() call.
499 */
500 if (!src->absinfo)
501 return;
502
503 input_set_capability(dst, EV_ABS, dst_axis);
504 if (!dst->absinfo)
505 return;
506
507 dst->absinfo[dst_axis] = src->absinfo[src_axis];
508 }
509 EXPORT_SYMBOL(input_copy_abs);
510
511 /**
512 * input_grab_device - grabs device for exclusive use
513 * @handle: input handle that wants to own the device
514 *
515 * When a device is grabbed by an input handle all events generated by
516 * the device are delivered only to this handle. Also events injected
517 * by other input handles are ignored while device is grabbed.
518 */
input_grab_device(struct input_handle * handle)519 int input_grab_device(struct input_handle *handle)
520 {
521 struct input_dev *dev = handle->dev;
522
523 scoped_cond_guard(mutex_intr, return -EINTR, &dev->mutex) {
524 if (dev->grab)
525 return -EBUSY;
526
527 rcu_assign_pointer(dev->grab, handle);
528 }
529
530 return 0;
531 }
532 EXPORT_SYMBOL(input_grab_device);
533
__input_release_device(struct input_handle * handle)534 static void __input_release_device(struct input_handle *handle)
535 {
536 struct input_dev *dev = handle->dev;
537 struct input_handle *grabber;
538
539 grabber = rcu_dereference_protected(dev->grab,
540 lockdep_is_held(&dev->mutex));
541 if (grabber == handle) {
542 rcu_assign_pointer(dev->grab, NULL);
543 /* Make sure input_pass_values() notices that grab is gone */
544 synchronize_rcu();
545
546 list_for_each_entry(handle, &dev->h_list, d_node)
547 if (handle->open && handle->handler->start)
548 handle->handler->start(handle);
549 }
550 }
551
552 /**
553 * input_release_device - release previously grabbed device
554 * @handle: input handle that owns the device
555 *
556 * Releases previously grabbed device so that other input handles can
557 * start receiving input events. Upon release all handlers attached
558 * to the device have their start() method called so they have a change
559 * to synchronize device state with the rest of the system.
560 */
input_release_device(struct input_handle * handle)561 void input_release_device(struct input_handle *handle)
562 {
563 struct input_dev *dev = handle->dev;
564
565 guard(mutex)(&dev->mutex);
566 __input_release_device(handle);
567 }
568 EXPORT_SYMBOL(input_release_device);
569
570 /**
571 * input_open_device - open input device
572 * @handle: handle through which device is being accessed
573 *
574 * This function should be called by input handlers when they
575 * want to start receive events from given input device.
576 */
input_open_device(struct input_handle * handle)577 int input_open_device(struct input_handle *handle)
578 {
579 struct input_dev *dev = handle->dev;
580 int error;
581
582 scoped_cond_guard(mutex_intr, return -EINTR, &dev->mutex) {
583 if (dev->going_away)
584 return -ENODEV;
585
586 handle->open++;
587
588 if (handle->handler->passive_observer)
589 return 0;
590
591 if (dev->users++ || dev->inhibited) {
592 /*
593 * Device is already opened and/or inhibited,
594 * so we can exit immediately and report success.
595 */
596 return 0;
597 }
598
599 if (dev->open) {
600 error = dev->open(dev);
601 if (error) {
602 dev->users--;
603 handle->open--;
604 /*
605 * Make sure we are not delivering any more
606 * events through this handle.
607 */
608 synchronize_rcu();
609 return error;
610 }
611 }
612
613 if (dev->poller)
614 input_dev_poller_start(dev->poller);
615 }
616
617 return 0;
618 }
619 EXPORT_SYMBOL(input_open_device);
620
input_flush_device(struct input_handle * handle,struct file * file)621 int input_flush_device(struct input_handle *handle, struct file *file)
622 {
623 struct input_dev *dev = handle->dev;
624
625 scoped_cond_guard(mutex_intr, return -EINTR, &dev->mutex) {
626 if (dev->flush)
627 return dev->flush(dev, file);
628 }
629
630 return 0;
631 }
632 EXPORT_SYMBOL(input_flush_device);
633
634 /**
635 * input_close_device - close input device
636 * @handle: handle through which device is being accessed
637 *
638 * This function should be called by input handlers when they
639 * want to stop receive events from given input device.
640 */
input_close_device(struct input_handle * handle)641 void input_close_device(struct input_handle *handle)
642 {
643 struct input_dev *dev = handle->dev;
644
645 guard(mutex)(&dev->mutex);
646
647 __input_release_device(handle);
648
649 if (!handle->handler->passive_observer) {
650 if (!--dev->users && !dev->inhibited) {
651 if (dev->poller)
652 input_dev_poller_stop(dev->poller);
653 if (dev->close)
654 dev->close(dev);
655 }
656 }
657
658 if (!--handle->open) {
659 /*
660 * synchronize_rcu() makes sure that input_pass_values()
661 * completed and that no more input events are delivered
662 * through this handle
663 */
664 synchronize_rcu();
665 }
666 }
667 EXPORT_SYMBOL(input_close_device);
668
669 /*
670 * Simulate keyup events for all keys that are marked as pressed.
671 * The function must be called with dev->event_lock held.
672 */
input_dev_release_keys(struct input_dev * dev)673 static bool input_dev_release_keys(struct input_dev *dev)
674 {
675 bool need_sync = false;
676 int code;
677
678 lockdep_assert_held(&dev->event_lock);
679
680 if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) {
681 for_each_set_bit(code, dev->key, KEY_CNT) {
682 input_handle_event(dev, EV_KEY, code, 0);
683 need_sync = true;
684 }
685 }
686
687 return need_sync;
688 }
689
690 /*
691 * Prepare device for unregistering
692 */
input_disconnect_device(struct input_dev * dev)693 static void input_disconnect_device(struct input_dev *dev)
694 {
695 struct input_handle *handle;
696
697 /*
698 * Mark device as going away. Note that we take dev->mutex here
699 * not to protect access to dev->going_away but rather to ensure
700 * that there are no threads in the middle of input_open_device()
701 */
702 scoped_guard(mutex, &dev->mutex)
703 dev->going_away = true;
704
705 guard(spinlock_irq)(&dev->event_lock);
706
707 /*
708 * Simulate keyup events for all pressed keys so that handlers
709 * are not left with "stuck" keys. The driver may continue
710 * generate events even after we done here but they will not
711 * reach any handlers.
712 */
713 if (input_dev_release_keys(dev))
714 input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
715
716 list_for_each_entry(handle, &dev->h_list, d_node)
717 handle->open = 0;
718 }
719
720 /**
721 * input_scancode_to_scalar() - converts scancode in &struct input_keymap_entry
722 * @ke: keymap entry containing scancode to be converted.
723 * @scancode: pointer to the location where converted scancode should
724 * be stored.
725 *
726 * This function is used to convert scancode stored in &struct keymap_entry
727 * into scalar form understood by legacy keymap handling methods. These
728 * methods expect scancodes to be represented as 'unsigned int'.
729 */
input_scancode_to_scalar(const struct input_keymap_entry * ke,unsigned int * scancode)730 int input_scancode_to_scalar(const struct input_keymap_entry *ke,
731 unsigned int *scancode)
732 {
733 switch (ke->len) {
734 case 1:
735 *scancode = *((u8 *)ke->scancode);
736 break;
737
738 case 2:
739 *scancode = *((u16 *)ke->scancode);
740 break;
741
742 case 4:
743 *scancode = *((u32 *)ke->scancode);
744 break;
745
746 default:
747 return -EINVAL;
748 }
749
750 return 0;
751 }
752 EXPORT_SYMBOL(input_scancode_to_scalar);
753
754 /*
755 * Those routines handle the default case where no [gs]etkeycode() is
756 * defined. In this case, an array indexed by the scancode is used.
757 */
758
input_fetch_keycode(struct input_dev * dev,unsigned int index)759 static unsigned int input_fetch_keycode(struct input_dev *dev,
760 unsigned int index)
761 {
762 switch (dev->keycodesize) {
763 case 1:
764 return ((u8 *)dev->keycode)[index];
765
766 case 2:
767 return ((u16 *)dev->keycode)[index];
768
769 default:
770 return ((u32 *)dev->keycode)[index];
771 }
772 }
773
input_default_getkeycode(struct input_dev * dev,struct input_keymap_entry * ke)774 static int input_default_getkeycode(struct input_dev *dev,
775 struct input_keymap_entry *ke)
776 {
777 unsigned int index;
778 int error;
779
780 if (!dev->keycodesize)
781 return -EINVAL;
782
783 if (ke->flags & INPUT_KEYMAP_BY_INDEX)
784 index = ke->index;
785 else {
786 error = input_scancode_to_scalar(ke, &index);
787 if (error)
788 return error;
789 }
790
791 if (index >= dev->keycodemax)
792 return -EINVAL;
793
794 ke->keycode = input_fetch_keycode(dev, index);
795 ke->index = index;
796 ke->len = sizeof(index);
797 memcpy(ke->scancode, &index, sizeof(index));
798
799 return 0;
800 }
801
input_default_setkeycode(struct input_dev * dev,const struct input_keymap_entry * ke,unsigned int * old_keycode)802 static int input_default_setkeycode(struct input_dev *dev,
803 const struct input_keymap_entry *ke,
804 unsigned int *old_keycode)
805 {
806 unsigned int index;
807 int error;
808 int i;
809
810 if (!dev->keycodesize)
811 return -EINVAL;
812
813 if (ke->flags & INPUT_KEYMAP_BY_INDEX) {
814 index = ke->index;
815 } else {
816 error = input_scancode_to_scalar(ke, &index);
817 if (error)
818 return error;
819 }
820
821 if (index >= dev->keycodemax)
822 return -EINVAL;
823
824 if (dev->keycodesize < sizeof(ke->keycode) &&
825 (ke->keycode >> (dev->keycodesize * 8)))
826 return -EINVAL;
827
828 switch (dev->keycodesize) {
829 case 1: {
830 u8 *k = (u8 *)dev->keycode;
831 *old_keycode = k[index];
832 k[index] = ke->keycode;
833 break;
834 }
835 case 2: {
836 u16 *k = (u16 *)dev->keycode;
837 *old_keycode = k[index];
838 k[index] = ke->keycode;
839 break;
840 }
841 default: {
842 u32 *k = (u32 *)dev->keycode;
843 *old_keycode = k[index];
844 k[index] = ke->keycode;
845 break;
846 }
847 }
848
849 if (*old_keycode <= KEY_MAX) {
850 __clear_bit(*old_keycode, dev->keybit);
851 for (i = 0; i < dev->keycodemax; i++) {
852 if (input_fetch_keycode(dev, i) == *old_keycode) {
853 __set_bit(*old_keycode, dev->keybit);
854 /* Setting the bit twice is useless, so break */
855 break;
856 }
857 }
858 }
859
860 __set_bit(ke->keycode, dev->keybit);
861 return 0;
862 }
863
864 /**
865 * input_get_keycode - retrieve keycode currently mapped to a given scancode
866 * @dev: input device which keymap is being queried
867 * @ke: keymap entry
868 *
869 * This function should be called by anyone interested in retrieving current
870 * keymap. Presently evdev handlers use it.
871 */
input_get_keycode(struct input_dev * dev,struct input_keymap_entry * ke)872 int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke)
873 {
874 guard(spinlock_irqsave)(&dev->event_lock);
875
876 return dev->getkeycode(dev, ke);
877 }
878 EXPORT_SYMBOL(input_get_keycode);
879
880 /**
881 * input_set_keycode - attribute a keycode to a given scancode
882 * @dev: input device which keymap is being updated
883 * @ke: new keymap entry
884 *
885 * This function should be called by anyone needing to update current
886 * keymap. Presently keyboard and evdev handlers use it.
887 */
input_set_keycode(struct input_dev * dev,const struct input_keymap_entry * ke)888 int input_set_keycode(struct input_dev *dev,
889 const struct input_keymap_entry *ke)
890 {
891 unsigned int old_keycode;
892 int error;
893
894 if (ke->keycode > KEY_MAX)
895 return -EINVAL;
896
897 guard(spinlock_irqsave)(&dev->event_lock);
898
899 error = dev->setkeycode(dev, ke, &old_keycode);
900 if (error)
901 return error;
902
903 /* Make sure KEY_RESERVED did not get enabled. */
904 __clear_bit(KEY_RESERVED, dev->keybit);
905
906 /*
907 * Simulate keyup event if keycode is not present
908 * in the keymap anymore
909 */
910 if (old_keycode > KEY_MAX) {
911 dev_warn(dev->dev.parent ?: &dev->dev,
912 "%s: got too big old keycode %#x\n",
913 __func__, old_keycode);
914 } else if (test_bit(EV_KEY, dev->evbit) &&
915 !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
916 __test_and_clear_bit(old_keycode, dev->key)) {
917 /*
918 * We have to use input_event_dispose() here directly instead
919 * of input_handle_event() because the key we want to release
920 * here is considered no longer supported by the device and
921 * input_handle_event() will ignore it.
922 */
923 input_event_dispose(dev, INPUT_PASS_TO_HANDLERS,
924 EV_KEY, old_keycode, 0);
925 input_event_dispose(dev, INPUT_PASS_TO_HANDLERS | INPUT_FLUSH,
926 EV_SYN, SYN_REPORT, 1);
927 }
928
929 return 0;
930 }
931 EXPORT_SYMBOL(input_set_keycode);
932
input_match_device_id(const struct input_dev * dev,const struct input_device_id * id)933 bool input_match_device_id(const struct input_dev *dev,
934 const struct input_device_id *id)
935 {
936 if (id->flags & INPUT_DEVICE_ID_MATCH_BUS)
937 if (id->bustype != dev->id.bustype)
938 return false;
939
940 if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR)
941 if (id->vendor != dev->id.vendor)
942 return false;
943
944 if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT)
945 if (id->product != dev->id.product)
946 return false;
947
948 if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION)
949 if (id->version != dev->id.version)
950 return false;
951
952 if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX) ||
953 !bitmap_subset(id->keybit, dev->keybit, KEY_MAX) ||
954 !bitmap_subset(id->relbit, dev->relbit, REL_MAX) ||
955 !bitmap_subset(id->absbit, dev->absbit, ABS_MAX) ||
956 !bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX) ||
957 !bitmap_subset(id->ledbit, dev->ledbit, LED_MAX) ||
958 !bitmap_subset(id->sndbit, dev->sndbit, SND_MAX) ||
959 !bitmap_subset(id->ffbit, dev->ffbit, FF_MAX) ||
960 !bitmap_subset(id->swbit, dev->swbit, SW_MAX) ||
961 !bitmap_subset(id->propbit, dev->propbit, INPUT_PROP_MAX)) {
962 return false;
963 }
964
965 return true;
966 }
967 EXPORT_SYMBOL(input_match_device_id);
968
input_match_device(struct input_handler * handler,struct input_dev * dev)969 static const struct input_device_id *input_match_device(struct input_handler *handler,
970 struct input_dev *dev)
971 {
972 const struct input_device_id *id;
973
974 for (id = handler->id_table; id->flags || id->driver_info; id++) {
975 if (input_match_device_id(dev, id) &&
976 (!handler->match || handler->match(handler, dev))) {
977 return id;
978 }
979 }
980
981 return NULL;
982 }
983
input_attach_handler(struct input_dev * dev,struct input_handler * handler)984 static int input_attach_handler(struct input_dev *dev, struct input_handler *handler)
985 {
986 const struct input_device_id *id;
987 int error;
988
989 id = input_match_device(handler, dev);
990 if (!id)
991 return -ENODEV;
992
993 error = handler->connect(handler, dev, id);
994 if (error && error != -ENODEV)
995 pr_err("failed to attach handler %s to device %s, error: %d\n",
996 handler->name, kobject_name(&dev->dev.kobj), error);
997
998 return error;
999 }
1000
1001 #ifdef CONFIG_COMPAT
1002
input_bits_to_string(char * buf,int buf_size,unsigned long bits,bool skip_empty)1003 static int input_bits_to_string(char *buf, int buf_size,
1004 unsigned long bits, bool skip_empty)
1005 {
1006 int len = 0;
1007
1008 if (in_compat_syscall()) {
1009 u32 dword = bits >> 32;
1010 if (dword || !skip_empty)
1011 len += snprintf(buf, buf_size, "%x ", dword);
1012
1013 dword = bits & 0xffffffffUL;
1014 if (dword || !skip_empty || len)
1015 len += snprintf(buf + len, max(buf_size - len, 0),
1016 "%x", dword);
1017 } else {
1018 if (bits || !skip_empty)
1019 len += snprintf(buf, buf_size, "%lx", bits);
1020 }
1021
1022 return len;
1023 }
1024
1025 #else /* !CONFIG_COMPAT */
1026
input_bits_to_string(char * buf,int buf_size,unsigned long bits,bool skip_empty)1027 static int input_bits_to_string(char *buf, int buf_size,
1028 unsigned long bits, bool skip_empty)
1029 {
1030 return bits || !skip_empty ?
1031 snprintf(buf, buf_size, "%lx", bits) : 0;
1032 }
1033
1034 #endif
1035
1036 #ifdef CONFIG_PROC_FS
1037
1038 static struct proc_dir_entry *proc_bus_input_dir;
1039 static DECLARE_WAIT_QUEUE_HEAD(input_devices_poll_wait);
1040 static int input_devices_state;
1041
input_wakeup_procfs_readers(void)1042 static inline void input_wakeup_procfs_readers(void)
1043 {
1044 input_devices_state++;
1045 wake_up(&input_devices_poll_wait);
1046 }
1047
1048 struct input_seq_state {
1049 unsigned short pos;
1050 bool mutex_acquired;
1051 int input_devices_state;
1052 };
1053
input_proc_devices_poll(struct file * file,poll_table * wait)1054 static __poll_t input_proc_devices_poll(struct file *file, poll_table *wait)
1055 {
1056 struct seq_file *seq = file->private_data;
1057 struct input_seq_state *state = seq->private;
1058
1059 poll_wait(file, &input_devices_poll_wait, wait);
1060 if (state->input_devices_state != input_devices_state) {
1061 state->input_devices_state = input_devices_state;
1062 return EPOLLIN | EPOLLRDNORM;
1063 }
1064
1065 return 0;
1066 }
1067
input_devices_seq_start(struct seq_file * seq,loff_t * pos)1068 static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos)
1069 {
1070 struct input_seq_state *state = seq->private;
1071 int error;
1072
1073 error = mutex_lock_interruptible(&input_mutex);
1074 if (error) {
1075 state->mutex_acquired = false;
1076 return ERR_PTR(error);
1077 }
1078
1079 state->mutex_acquired = true;
1080
1081 return seq_list_start(&input_dev_list, *pos);
1082 }
1083
input_devices_seq_next(struct seq_file * seq,void * v,loff_t * pos)1084 static void *input_devices_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1085 {
1086 return seq_list_next(v, &input_dev_list, pos);
1087 }
1088
input_seq_stop(struct seq_file * seq,void * v)1089 static void input_seq_stop(struct seq_file *seq, void *v)
1090 {
1091 struct input_seq_state *state = seq->private;
1092
1093 if (state->mutex_acquired)
1094 mutex_unlock(&input_mutex);
1095 }
1096
input_seq_print_bitmap(struct seq_file * seq,const char * name,unsigned long * bitmap,int max)1097 static void input_seq_print_bitmap(struct seq_file *seq, const char *name,
1098 unsigned long *bitmap, int max)
1099 {
1100 int i;
1101 bool skip_empty = true;
1102 char buf[18];
1103
1104 seq_printf(seq, "B: %s=", name);
1105
1106 for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) {
1107 if (input_bits_to_string(buf, sizeof(buf),
1108 bitmap[i], skip_empty)) {
1109 skip_empty = false;
1110 seq_printf(seq, "%s%s", buf, i > 0 ? " " : "");
1111 }
1112 }
1113
1114 /*
1115 * If no output was produced print a single 0.
1116 */
1117 if (skip_empty)
1118 seq_putc(seq, '0');
1119
1120 seq_putc(seq, '\n');
1121 }
1122
input_devices_seq_show(struct seq_file * seq,void * v)1123 static int input_devices_seq_show(struct seq_file *seq, void *v)
1124 {
1125 struct input_dev *dev = container_of(v, struct input_dev, node);
1126 const char *path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
1127 struct input_handle *handle;
1128
1129 seq_printf(seq, "I: Bus=%04x Vendor=%04x Product=%04x Version=%04x\n",
1130 dev->id.bustype, dev->id.vendor, dev->id.product, dev->id.version);
1131
1132 seq_printf(seq, "N: Name=\"%s\"\n", dev->name ? dev->name : "");
1133 seq_printf(seq, "P: Phys=%s\n", dev->phys ? dev->phys : "");
1134 seq_printf(seq, "S: Sysfs=%s\n", path ? path : "");
1135 seq_printf(seq, "U: Uniq=%s\n", dev->uniq ? dev->uniq : "");
1136 seq_puts(seq, "H: Handlers=");
1137
1138 list_for_each_entry(handle, &dev->h_list, d_node)
1139 seq_printf(seq, "%s ", handle->name);
1140 seq_putc(seq, '\n');
1141
1142 input_seq_print_bitmap(seq, "PROP", dev->propbit, INPUT_PROP_MAX);
1143
1144 input_seq_print_bitmap(seq, "EV", dev->evbit, EV_MAX);
1145 if (test_bit(EV_KEY, dev->evbit))
1146 input_seq_print_bitmap(seq, "KEY", dev->keybit, KEY_MAX);
1147 if (test_bit(EV_REL, dev->evbit))
1148 input_seq_print_bitmap(seq, "REL", dev->relbit, REL_MAX);
1149 if (test_bit(EV_ABS, dev->evbit))
1150 input_seq_print_bitmap(seq, "ABS", dev->absbit, ABS_MAX);
1151 if (test_bit(EV_MSC, dev->evbit))
1152 input_seq_print_bitmap(seq, "MSC", dev->mscbit, MSC_MAX);
1153 if (test_bit(EV_LED, dev->evbit))
1154 input_seq_print_bitmap(seq, "LED", dev->ledbit, LED_MAX);
1155 if (test_bit(EV_SND, dev->evbit))
1156 input_seq_print_bitmap(seq, "SND", dev->sndbit, SND_MAX);
1157 if (test_bit(EV_FF, dev->evbit))
1158 input_seq_print_bitmap(seq, "FF", dev->ffbit, FF_MAX);
1159 if (test_bit(EV_SW, dev->evbit))
1160 input_seq_print_bitmap(seq, "SW", dev->swbit, SW_MAX);
1161
1162 seq_putc(seq, '\n');
1163
1164 kfree(path);
1165 return 0;
1166 }
1167
1168 static const struct seq_operations input_devices_seq_ops = {
1169 .start = input_devices_seq_start,
1170 .next = input_devices_seq_next,
1171 .stop = input_seq_stop,
1172 .show = input_devices_seq_show,
1173 };
1174
input_proc_devices_open(struct inode * inode,struct file * file)1175 static int input_proc_devices_open(struct inode *inode, struct file *file)
1176 {
1177 return seq_open_private(file, &input_devices_seq_ops,
1178 sizeof(struct input_seq_state));
1179 }
1180
1181 static const struct proc_ops input_devices_proc_ops = {
1182 .proc_open = input_proc_devices_open,
1183 .proc_poll = input_proc_devices_poll,
1184 .proc_read = seq_read,
1185 .proc_lseek = seq_lseek,
1186 .proc_release = seq_release_private,
1187 };
1188
input_handlers_seq_start(struct seq_file * seq,loff_t * pos)1189 static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos)
1190 {
1191 struct input_seq_state *state = seq->private;
1192 int error;
1193
1194 error = mutex_lock_interruptible(&input_mutex);
1195 if (error) {
1196 state->mutex_acquired = false;
1197 return ERR_PTR(error);
1198 }
1199
1200 state->mutex_acquired = true;
1201 state->pos = *pos;
1202
1203 return seq_list_start(&input_handler_list, *pos);
1204 }
1205
input_handlers_seq_next(struct seq_file * seq,void * v,loff_t * pos)1206 static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1207 {
1208 struct input_seq_state *state = seq->private;
1209
1210 state->pos = *pos + 1;
1211 return seq_list_next(v, &input_handler_list, pos);
1212 }
1213
input_handlers_seq_show(struct seq_file * seq,void * v)1214 static int input_handlers_seq_show(struct seq_file *seq, void *v)
1215 {
1216 struct input_handler *handler = container_of(v, struct input_handler, node);
1217 struct input_seq_state *state = seq->private;
1218
1219 seq_printf(seq, "N: Number=%u Name=%s", state->pos, handler->name);
1220 if (handler->filter)
1221 seq_puts(seq, " (filter)");
1222 if (handler->legacy_minors)
1223 seq_printf(seq, " Minor=%d", handler->minor);
1224 seq_putc(seq, '\n');
1225
1226 return 0;
1227 }
1228
1229 static const struct seq_operations input_handlers_seq_ops = {
1230 .start = input_handlers_seq_start,
1231 .next = input_handlers_seq_next,
1232 .stop = input_seq_stop,
1233 .show = input_handlers_seq_show,
1234 };
1235
input_proc_handlers_open(struct inode * inode,struct file * file)1236 static int input_proc_handlers_open(struct inode *inode, struct file *file)
1237 {
1238 return seq_open_private(file, &input_handlers_seq_ops,
1239 sizeof(struct input_seq_state));
1240 }
1241
1242 static const struct proc_ops input_handlers_proc_ops = {
1243 .proc_open = input_proc_handlers_open,
1244 .proc_read = seq_read,
1245 .proc_lseek = seq_lseek,
1246 .proc_release = seq_release_private,
1247 };
1248
input_proc_init(void)1249 static int __init input_proc_init(void)
1250 {
1251 struct proc_dir_entry *entry;
1252
1253 proc_bus_input_dir = proc_mkdir("bus/input", NULL);
1254 if (!proc_bus_input_dir)
1255 return -ENOMEM;
1256
1257 entry = proc_create("devices", 0, proc_bus_input_dir,
1258 &input_devices_proc_ops);
1259 if (!entry)
1260 goto fail1;
1261
1262 entry = proc_create("handlers", 0, proc_bus_input_dir,
1263 &input_handlers_proc_ops);
1264 if (!entry)
1265 goto fail2;
1266
1267 return 0;
1268
1269 fail2: remove_proc_entry("devices", proc_bus_input_dir);
1270 fail1: remove_proc_entry("bus/input", NULL);
1271 return -ENOMEM;
1272 }
1273
input_proc_exit(void)1274 static void input_proc_exit(void)
1275 {
1276 remove_proc_entry("devices", proc_bus_input_dir);
1277 remove_proc_entry("handlers", proc_bus_input_dir);
1278 remove_proc_entry("bus/input", NULL);
1279 }
1280
1281 #else /* !CONFIG_PROC_FS */
input_wakeup_procfs_readers(void)1282 static inline void input_wakeup_procfs_readers(void) { }
input_proc_init(void)1283 static inline int input_proc_init(void) { return 0; }
input_proc_exit(void)1284 static inline void input_proc_exit(void) { }
1285 #endif
1286
1287 #define INPUT_DEV_STRING_ATTR_SHOW(name) \
1288 static ssize_t input_dev_show_##name(struct device *dev, \
1289 struct device_attribute *attr, \
1290 char *buf) \
1291 { \
1292 struct input_dev *input_dev = to_input_dev(dev); \
1293 \
1294 return sysfs_emit(buf, "%s\n", \
1295 input_dev->name ? input_dev->name : ""); \
1296 } \
1297 static DEVICE_ATTR(name, S_IRUGO, input_dev_show_##name, NULL)
1298
1299 INPUT_DEV_STRING_ATTR_SHOW(name);
1300 INPUT_DEV_STRING_ATTR_SHOW(phys);
1301 INPUT_DEV_STRING_ATTR_SHOW(uniq);
1302
input_print_modalias_bits(char * buf,int size,char name,const unsigned long * bm,unsigned int min_bit,unsigned int max_bit)1303 static int input_print_modalias_bits(char *buf, int size,
1304 char name, const unsigned long *bm,
1305 unsigned int min_bit, unsigned int max_bit)
1306 {
1307 int bit = min_bit;
1308 int len = 0;
1309
1310 len += snprintf(buf, max(size, 0), "%c", name);
1311 for_each_set_bit_from(bit, bm, max_bit)
1312 len += snprintf(buf + len, max(size - len, 0), "%X,", bit);
1313 return len;
1314 }
1315
input_print_modalias_parts(char * buf,int size,int full_len,const struct input_dev * id)1316 static int input_print_modalias_parts(char *buf, int size, int full_len,
1317 const struct input_dev *id)
1318 {
1319 int len, klen, remainder, space;
1320
1321 len = snprintf(buf, max(size, 0),
1322 "input:b%04Xv%04Xp%04Xe%04X-",
1323 id->id.bustype, id->id.vendor,
1324 id->id.product, id->id.version);
1325
1326 len += input_print_modalias_bits(buf + len, size - len,
1327 'e', id->evbit, 0, EV_MAX);
1328
1329 /*
1330 * Calculate the remaining space in the buffer making sure we
1331 * have place for the terminating 0.
1332 */
1333 space = max(size - (len + 1), 0);
1334
1335 klen = input_print_modalias_bits(buf + len, size - len,
1336 'k', id->keybit, KEY_MIN_INTERESTING, KEY_MAX);
1337 len += klen;
1338
1339 /*
1340 * If we have more data than we can fit in the buffer, check
1341 * if we can trim key data to fit in the rest. We will indicate
1342 * that key data is incomplete by adding "+" sign at the end, like
1343 * this: * "k1,2,3,45,+,".
1344 *
1345 * Note that we shortest key info (if present) is "k+," so we
1346 * can only try to trim if key data is longer than that.
1347 */
1348 if (full_len && size < full_len + 1 && klen > 3) {
1349 remainder = full_len - len;
1350 /*
1351 * We can only trim if we have space for the remainder
1352 * and also for at least "k+," which is 3 more characters.
1353 */
1354 if (remainder <= space - 3) {
1355 /*
1356 * We are guaranteed to have 'k' in the buffer, so
1357 * we need at least 3 additional bytes for storing
1358 * "+," in addition to the remainder.
1359 */
1360 for (int i = size - 1 - remainder - 3; i >= 0; i--) {
1361 if (buf[i] == 'k' || buf[i] == ',') {
1362 strcpy(buf + i + 1, "+,");
1363 len = i + 3; /* Not counting '\0' */
1364 break;
1365 }
1366 }
1367 }
1368 }
1369
1370 len += input_print_modalias_bits(buf + len, size - len,
1371 'r', id->relbit, 0, REL_MAX);
1372 len += input_print_modalias_bits(buf + len, size - len,
1373 'a', id->absbit, 0, ABS_MAX);
1374 len += input_print_modalias_bits(buf + len, size - len,
1375 'm', id->mscbit, 0, MSC_MAX);
1376 len += input_print_modalias_bits(buf + len, size - len,
1377 'l', id->ledbit, 0, LED_MAX);
1378 len += input_print_modalias_bits(buf + len, size - len,
1379 's', id->sndbit, 0, SND_MAX);
1380 len += input_print_modalias_bits(buf + len, size - len,
1381 'f', id->ffbit, 0, FF_MAX);
1382 len += input_print_modalias_bits(buf + len, size - len,
1383 'w', id->swbit, 0, SW_MAX);
1384
1385 return len;
1386 }
1387
input_print_modalias(char * buf,int size,const struct input_dev * id)1388 static int input_print_modalias(char *buf, int size, const struct input_dev *id)
1389 {
1390 int full_len;
1391
1392 /*
1393 * Printing is done in 2 passes: first one figures out total length
1394 * needed for the modalias string, second one will try to trim key
1395 * data in case when buffer is too small for the entire modalias.
1396 * If the buffer is too small regardless, it will fill as much as it
1397 * can (without trimming key data) into the buffer and leave it to
1398 * the caller to figure out what to do with the result.
1399 */
1400 full_len = input_print_modalias_parts(NULL, 0, 0, id);
1401 return input_print_modalias_parts(buf, size, full_len, id);
1402 }
1403
input_dev_show_modalias(struct device * dev,struct device_attribute * attr,char * buf)1404 static ssize_t input_dev_show_modalias(struct device *dev,
1405 struct device_attribute *attr,
1406 char *buf)
1407 {
1408 struct input_dev *id = to_input_dev(dev);
1409 ssize_t len;
1410
1411 len = input_print_modalias(buf, PAGE_SIZE, id);
1412 if (len < PAGE_SIZE - 2)
1413 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1414
1415 return min_t(int, len, PAGE_SIZE);
1416 }
1417 static DEVICE_ATTR(modalias, S_IRUGO, input_dev_show_modalias, NULL);
1418
1419 static int input_print_bitmap(char *buf, int buf_size, const unsigned long *bitmap,
1420 int max, int add_cr);
1421
input_dev_show_properties(struct device * dev,struct device_attribute * attr,char * buf)1422 static ssize_t input_dev_show_properties(struct device *dev,
1423 struct device_attribute *attr,
1424 char *buf)
1425 {
1426 struct input_dev *input_dev = to_input_dev(dev);
1427 int len = input_print_bitmap(buf, PAGE_SIZE, input_dev->propbit,
1428 INPUT_PROP_MAX, true);
1429 return min_t(int, len, PAGE_SIZE);
1430 }
1431 static DEVICE_ATTR(properties, S_IRUGO, input_dev_show_properties, NULL);
1432
1433 static int input_inhibit_device(struct input_dev *dev);
1434 static int input_uninhibit_device(struct input_dev *dev);
1435
inhibited_show(struct device * dev,struct device_attribute * attr,char * buf)1436 static ssize_t inhibited_show(struct device *dev,
1437 struct device_attribute *attr,
1438 char *buf)
1439 {
1440 struct input_dev *input_dev = to_input_dev(dev);
1441
1442 return sysfs_emit(buf, "%d\n", input_dev->inhibited);
1443 }
1444
inhibited_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1445 static ssize_t inhibited_store(struct device *dev,
1446 struct device_attribute *attr, const char *buf,
1447 size_t len)
1448 {
1449 struct input_dev *input_dev = to_input_dev(dev);
1450 ssize_t rv;
1451 bool inhibited;
1452
1453 if (kstrtobool(buf, &inhibited))
1454 return -EINVAL;
1455
1456 if (inhibited)
1457 rv = input_inhibit_device(input_dev);
1458 else
1459 rv = input_uninhibit_device(input_dev);
1460
1461 if (rv != 0)
1462 return rv;
1463
1464 return len;
1465 }
1466
1467 static DEVICE_ATTR_RW(inhibited);
1468
1469 static struct attribute *input_dev_attrs[] = {
1470 &dev_attr_name.attr,
1471 &dev_attr_phys.attr,
1472 &dev_attr_uniq.attr,
1473 &dev_attr_modalias.attr,
1474 &dev_attr_properties.attr,
1475 &dev_attr_inhibited.attr,
1476 NULL
1477 };
1478
1479 static const struct attribute_group input_dev_attr_group = {
1480 .attrs = input_dev_attrs,
1481 };
1482
1483 #define INPUT_DEV_ID_ATTR(name) \
1484 static ssize_t input_dev_show_id_##name(struct device *dev, \
1485 struct device_attribute *attr, \
1486 char *buf) \
1487 { \
1488 struct input_dev *input_dev = to_input_dev(dev); \
1489 return sysfs_emit(buf, "%04x\n", input_dev->id.name); \
1490 } \
1491 static DEVICE_ATTR(name, S_IRUGO, input_dev_show_id_##name, NULL)
1492
1493 INPUT_DEV_ID_ATTR(bustype);
1494 INPUT_DEV_ID_ATTR(vendor);
1495 INPUT_DEV_ID_ATTR(product);
1496 INPUT_DEV_ID_ATTR(version);
1497
1498 static struct attribute *input_dev_id_attrs[] = {
1499 &dev_attr_bustype.attr,
1500 &dev_attr_vendor.attr,
1501 &dev_attr_product.attr,
1502 &dev_attr_version.attr,
1503 NULL
1504 };
1505
1506 static const struct attribute_group input_dev_id_attr_group = {
1507 .name = "id",
1508 .attrs = input_dev_id_attrs,
1509 };
1510
input_print_bitmap(char * buf,int buf_size,const unsigned long * bitmap,int max,int add_cr)1511 static int input_print_bitmap(char *buf, int buf_size, const unsigned long *bitmap,
1512 int max, int add_cr)
1513 {
1514 int i;
1515 int len = 0;
1516 bool skip_empty = true;
1517
1518 for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) {
1519 len += input_bits_to_string(buf + len, max(buf_size - len, 0),
1520 bitmap[i], skip_empty);
1521 if (len) {
1522 skip_empty = false;
1523 if (i > 0)
1524 len += snprintf(buf + len, max(buf_size - len, 0), " ");
1525 }
1526 }
1527
1528 /*
1529 * If no output was produced print a single 0.
1530 */
1531 if (len == 0)
1532 len = snprintf(buf, buf_size, "%d", 0);
1533
1534 if (add_cr)
1535 len += snprintf(buf + len, max(buf_size - len, 0), "\n");
1536
1537 return len;
1538 }
1539
1540 #define INPUT_DEV_CAP_ATTR(ev, bm) \
1541 static ssize_t input_dev_show_cap_##bm(struct device *dev, \
1542 struct device_attribute *attr, \
1543 char *buf) \
1544 { \
1545 struct input_dev *input_dev = to_input_dev(dev); \
1546 int len = input_print_bitmap(buf, PAGE_SIZE, \
1547 input_dev->bm##bit, ev##_MAX, \
1548 true); \
1549 return min_t(int, len, PAGE_SIZE); \
1550 } \
1551 static DEVICE_ATTR(bm, S_IRUGO, input_dev_show_cap_##bm, NULL)
1552
1553 INPUT_DEV_CAP_ATTR(EV, ev);
1554 INPUT_DEV_CAP_ATTR(KEY, key);
1555 INPUT_DEV_CAP_ATTR(REL, rel);
1556 INPUT_DEV_CAP_ATTR(ABS, abs);
1557 INPUT_DEV_CAP_ATTR(MSC, msc);
1558 INPUT_DEV_CAP_ATTR(LED, led);
1559 INPUT_DEV_CAP_ATTR(SND, snd);
1560 INPUT_DEV_CAP_ATTR(FF, ff);
1561 INPUT_DEV_CAP_ATTR(SW, sw);
1562
1563 static struct attribute *input_dev_caps_attrs[] = {
1564 &dev_attr_ev.attr,
1565 &dev_attr_key.attr,
1566 &dev_attr_rel.attr,
1567 &dev_attr_abs.attr,
1568 &dev_attr_msc.attr,
1569 &dev_attr_led.attr,
1570 &dev_attr_snd.attr,
1571 &dev_attr_ff.attr,
1572 &dev_attr_sw.attr,
1573 NULL
1574 };
1575
1576 static const struct attribute_group input_dev_caps_attr_group = {
1577 .name = "capabilities",
1578 .attrs = input_dev_caps_attrs,
1579 };
1580
1581 static const struct attribute_group *input_dev_attr_groups[] = {
1582 &input_dev_attr_group,
1583 &input_dev_id_attr_group,
1584 &input_dev_caps_attr_group,
1585 &input_poller_attribute_group,
1586 NULL
1587 };
1588
input_dev_release(struct device * device)1589 static void input_dev_release(struct device *device)
1590 {
1591 struct input_dev *dev = to_input_dev(device);
1592
1593 input_ff_destroy(dev);
1594 input_mt_destroy_slots(dev);
1595 kfree(dev->poller);
1596 kfree(dev->absinfo);
1597 kfree(dev->vals);
1598 kfree(dev);
1599
1600 module_put(THIS_MODULE);
1601 }
1602
1603 /*
1604 * Input uevent interface - loading event handlers based on
1605 * device bitfields.
1606 */
input_add_uevent_bm_var(struct kobj_uevent_env * env,const char * name,const unsigned long * bitmap,int max)1607 static int input_add_uevent_bm_var(struct kobj_uevent_env *env,
1608 const char *name, const unsigned long *bitmap, int max)
1609 {
1610 int len;
1611
1612 if (add_uevent_var(env, "%s", name))
1613 return -ENOMEM;
1614
1615 len = input_print_bitmap(&env->buf[env->buflen - 1],
1616 sizeof(env->buf) - env->buflen,
1617 bitmap, max, false);
1618 if (len >= (sizeof(env->buf) - env->buflen))
1619 return -ENOMEM;
1620
1621 env->buflen += len;
1622 return 0;
1623 }
1624
1625 /*
1626 * This is a pretty gross hack. When building uevent data the driver core
1627 * may try adding more environment variables to kobj_uevent_env without
1628 * telling us, so we have no idea how much of the buffer we can use to
1629 * avoid overflows/-ENOMEM elsewhere. To work around this let's artificially
1630 * reduce amount of memory we will use for the modalias environment variable.
1631 *
1632 * The potential additions are:
1633 *
1634 * SEQNUM=18446744073709551615 - (%llu - 28 bytes)
1635 * HOME=/ (6 bytes)
1636 * PATH=/sbin:/bin:/usr/sbin:/usr/bin (34 bytes)
1637 *
1638 * 68 bytes total. Allow extra buffer - 96 bytes
1639 */
1640 #define UEVENT_ENV_EXTRA_LEN 96
1641
input_add_uevent_modalias_var(struct kobj_uevent_env * env,const struct input_dev * dev)1642 static int input_add_uevent_modalias_var(struct kobj_uevent_env *env,
1643 const struct input_dev *dev)
1644 {
1645 int len;
1646
1647 if (add_uevent_var(env, "MODALIAS="))
1648 return -ENOMEM;
1649
1650 len = input_print_modalias(&env->buf[env->buflen - 1],
1651 (int)sizeof(env->buf) - env->buflen -
1652 UEVENT_ENV_EXTRA_LEN,
1653 dev);
1654 if (len >= ((int)sizeof(env->buf) - env->buflen -
1655 UEVENT_ENV_EXTRA_LEN))
1656 return -ENOMEM;
1657
1658 env->buflen += len;
1659 return 0;
1660 }
1661
1662 #define INPUT_ADD_HOTPLUG_VAR(fmt, val...) \
1663 do { \
1664 int err = add_uevent_var(env, fmt, val); \
1665 if (err) \
1666 return err; \
1667 } while (0)
1668
1669 #define INPUT_ADD_HOTPLUG_BM_VAR(name, bm, max) \
1670 do { \
1671 int err = input_add_uevent_bm_var(env, name, bm, max); \
1672 if (err) \
1673 return err; \
1674 } while (0)
1675
1676 #define INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev) \
1677 do { \
1678 int err = input_add_uevent_modalias_var(env, dev); \
1679 if (err) \
1680 return err; \
1681 } while (0)
1682
input_dev_uevent(const struct device * device,struct kobj_uevent_env * env)1683 static int input_dev_uevent(const struct device *device, struct kobj_uevent_env *env)
1684 {
1685 const struct input_dev *dev = to_input_dev(device);
1686
1687 INPUT_ADD_HOTPLUG_VAR("PRODUCT=%x/%x/%x/%x",
1688 dev->id.bustype, dev->id.vendor,
1689 dev->id.product, dev->id.version);
1690 if (dev->name)
1691 INPUT_ADD_HOTPLUG_VAR("NAME=\"%s\"", dev->name);
1692 if (dev->phys)
1693 INPUT_ADD_HOTPLUG_VAR("PHYS=\"%s\"", dev->phys);
1694 if (dev->uniq)
1695 INPUT_ADD_HOTPLUG_VAR("UNIQ=\"%s\"", dev->uniq);
1696
1697 INPUT_ADD_HOTPLUG_BM_VAR("PROP=", dev->propbit, INPUT_PROP_MAX);
1698
1699 INPUT_ADD_HOTPLUG_BM_VAR("EV=", dev->evbit, EV_MAX);
1700 if (test_bit(EV_KEY, dev->evbit))
1701 INPUT_ADD_HOTPLUG_BM_VAR("KEY=", dev->keybit, KEY_MAX);
1702 if (test_bit(EV_REL, dev->evbit))
1703 INPUT_ADD_HOTPLUG_BM_VAR("REL=", dev->relbit, REL_MAX);
1704 if (test_bit(EV_ABS, dev->evbit))
1705 INPUT_ADD_HOTPLUG_BM_VAR("ABS=", dev->absbit, ABS_MAX);
1706 if (test_bit(EV_MSC, dev->evbit))
1707 INPUT_ADD_HOTPLUG_BM_VAR("MSC=", dev->mscbit, MSC_MAX);
1708 if (test_bit(EV_LED, dev->evbit))
1709 INPUT_ADD_HOTPLUG_BM_VAR("LED=", dev->ledbit, LED_MAX);
1710 if (test_bit(EV_SND, dev->evbit))
1711 INPUT_ADD_HOTPLUG_BM_VAR("SND=", dev->sndbit, SND_MAX);
1712 if (test_bit(EV_FF, dev->evbit))
1713 INPUT_ADD_HOTPLUG_BM_VAR("FF=", dev->ffbit, FF_MAX);
1714 if (test_bit(EV_SW, dev->evbit))
1715 INPUT_ADD_HOTPLUG_BM_VAR("SW=", dev->swbit, SW_MAX);
1716
1717 INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev);
1718
1719 return 0;
1720 }
1721
1722 #define INPUT_DO_TOGGLE(dev, type, bits, on) \
1723 do { \
1724 int i; \
1725 bool active; \
1726 \
1727 if (!test_bit(EV_##type, dev->evbit)) \
1728 break; \
1729 \
1730 for_each_set_bit(i, dev->bits##bit, type##_CNT) { \
1731 active = test_bit(i, dev->bits); \
1732 if (!active && !on) \
1733 continue; \
1734 \
1735 dev->event(dev, EV_##type, i, on ? active : 0); \
1736 } \
1737 } while (0)
1738
input_dev_toggle(struct input_dev * dev,bool activate)1739 static void input_dev_toggle(struct input_dev *dev, bool activate)
1740 {
1741 if (!dev->event)
1742 return;
1743
1744 INPUT_DO_TOGGLE(dev, LED, led, activate);
1745 INPUT_DO_TOGGLE(dev, SND, snd, activate);
1746
1747 if (activate && test_bit(EV_REP, dev->evbit)) {
1748 dev->event(dev, EV_REP, REP_PERIOD, dev->rep[REP_PERIOD]);
1749 dev->event(dev, EV_REP, REP_DELAY, dev->rep[REP_DELAY]);
1750 }
1751 }
1752
1753 /**
1754 * input_reset_device() - reset/restore the state of input device
1755 * @dev: input device whose state needs to be reset
1756 *
1757 * This function tries to reset the state of an opened input device and
1758 * bring internal state and state if the hardware in sync with each other.
1759 * We mark all keys as released, restore LED state, repeat rate, etc.
1760 */
input_reset_device(struct input_dev * dev)1761 void input_reset_device(struct input_dev *dev)
1762 {
1763 guard(mutex)(&dev->mutex);
1764 guard(spinlock_irqsave)(&dev->event_lock);
1765
1766 input_dev_toggle(dev, true);
1767 if (input_dev_release_keys(dev))
1768 input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
1769 }
1770 EXPORT_SYMBOL(input_reset_device);
1771
input_inhibit_device(struct input_dev * dev)1772 static int input_inhibit_device(struct input_dev *dev)
1773 {
1774 guard(mutex)(&dev->mutex);
1775
1776 if (dev->inhibited)
1777 return 0;
1778
1779 if (dev->users) {
1780 if (dev->close)
1781 dev->close(dev);
1782 if (dev->poller)
1783 input_dev_poller_stop(dev->poller);
1784 }
1785
1786 scoped_guard(spinlock_irq, &dev->event_lock) {
1787 input_mt_release_slots(dev);
1788 input_dev_release_keys(dev);
1789 input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
1790 input_dev_toggle(dev, false);
1791 }
1792
1793 dev->inhibited = true;
1794
1795 return 0;
1796 }
1797
input_uninhibit_device(struct input_dev * dev)1798 static int input_uninhibit_device(struct input_dev *dev)
1799 {
1800 int error;
1801
1802 guard(mutex)(&dev->mutex);
1803
1804 if (!dev->inhibited)
1805 return 0;
1806
1807 if (dev->users) {
1808 if (dev->open) {
1809 error = dev->open(dev);
1810 if (error)
1811 return error;
1812 }
1813 if (dev->poller)
1814 input_dev_poller_start(dev->poller);
1815 }
1816
1817 dev->inhibited = false;
1818
1819 scoped_guard(spinlock_irq, &dev->event_lock)
1820 input_dev_toggle(dev, true);
1821
1822 return 0;
1823 }
1824
input_dev_suspend(struct device * dev)1825 static int input_dev_suspend(struct device *dev)
1826 {
1827 struct input_dev *input_dev = to_input_dev(dev);
1828
1829 guard(spinlock_irq)(&input_dev->event_lock);
1830
1831 /*
1832 * Keys that are pressed now are unlikely to be
1833 * still pressed when we resume.
1834 */
1835 if (input_dev_release_keys(input_dev))
1836 input_handle_event(input_dev, EV_SYN, SYN_REPORT, 1);
1837
1838 /* Turn off LEDs and sounds, if any are active. */
1839 input_dev_toggle(input_dev, false);
1840
1841 return 0;
1842 }
1843
input_dev_resume(struct device * dev)1844 static int input_dev_resume(struct device *dev)
1845 {
1846 struct input_dev *input_dev = to_input_dev(dev);
1847
1848 guard(spinlock_irq)(&input_dev->event_lock);
1849
1850 /* Restore state of LEDs and sounds, if any were active. */
1851 input_dev_toggle(input_dev, true);
1852
1853 return 0;
1854 }
1855
input_dev_freeze(struct device * dev)1856 static int input_dev_freeze(struct device *dev)
1857 {
1858 struct input_dev *input_dev = to_input_dev(dev);
1859
1860 guard(spinlock_irq)(&input_dev->event_lock);
1861
1862 /*
1863 * Keys that are pressed now are unlikely to be
1864 * still pressed when we resume.
1865 */
1866 if (input_dev_release_keys(input_dev))
1867 input_handle_event(input_dev, EV_SYN, SYN_REPORT, 1);
1868
1869 return 0;
1870 }
1871
input_dev_poweroff(struct device * dev)1872 static int input_dev_poweroff(struct device *dev)
1873 {
1874 struct input_dev *input_dev = to_input_dev(dev);
1875
1876 guard(spinlock_irq)(&input_dev->event_lock);
1877
1878 /* Turn off LEDs and sounds, if any are active. */
1879 input_dev_toggle(input_dev, false);
1880
1881 return 0;
1882 }
1883
1884 static const struct dev_pm_ops input_dev_pm_ops = {
1885 .suspend = input_dev_suspend,
1886 .resume = input_dev_resume,
1887 .freeze = input_dev_freeze,
1888 .poweroff = input_dev_poweroff,
1889 .restore = input_dev_resume,
1890 };
1891
1892 static const struct device_type input_dev_type = {
1893 .groups = input_dev_attr_groups,
1894 .release = input_dev_release,
1895 .uevent = input_dev_uevent,
1896 .pm = pm_sleep_ptr(&input_dev_pm_ops),
1897 };
1898
input_devnode(const struct device * dev,umode_t * mode)1899 static char *input_devnode(const struct device *dev, umode_t *mode)
1900 {
1901 return kasprintf(GFP_KERNEL, "input/%s", dev_name(dev));
1902 }
1903
1904 const struct class input_class = {
1905 .name = "input",
1906 .devnode = input_devnode,
1907 };
1908 EXPORT_SYMBOL_GPL(input_class);
1909
1910 /**
1911 * input_allocate_device - allocate memory for new input device
1912 *
1913 * Returns prepared struct input_dev or %NULL.
1914 *
1915 * NOTE: Use input_free_device() to free devices that have not been
1916 * registered; input_unregister_device() should be used for already
1917 * registered devices.
1918 */
input_allocate_device(void)1919 struct input_dev *input_allocate_device(void)
1920 {
1921 static atomic_t input_no = ATOMIC_INIT(-1);
1922 struct input_dev *dev;
1923
1924 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1925 if (!dev)
1926 return NULL;
1927
1928 /*
1929 * Start with space for SYN_REPORT + 7 EV_KEY/EV_MSC events + 2 spare,
1930 * see input_estimate_events_per_packet(). We will tune the number
1931 * when we register the device.
1932 */
1933 dev->max_vals = 10;
1934 dev->vals = kcalloc(dev->max_vals, sizeof(*dev->vals), GFP_KERNEL);
1935 if (!dev->vals) {
1936 kfree(dev);
1937 return NULL;
1938 }
1939
1940 mutex_init(&dev->mutex);
1941 spin_lock_init(&dev->event_lock);
1942 timer_setup(&dev->timer, NULL, 0);
1943 INIT_LIST_HEAD(&dev->h_list);
1944 INIT_LIST_HEAD(&dev->node);
1945
1946 dev->dev.type = &input_dev_type;
1947 dev->dev.class = &input_class;
1948 device_initialize(&dev->dev);
1949 /*
1950 * From this point on we can no longer simply "kfree(dev)", we need
1951 * to use input_free_device() so that device core properly frees its
1952 * resources associated with the input device.
1953 */
1954
1955 dev_set_name(&dev->dev, "input%lu",
1956 (unsigned long)atomic_inc_return(&input_no));
1957
1958 __module_get(THIS_MODULE);
1959
1960 return dev;
1961 }
1962 EXPORT_SYMBOL(input_allocate_device);
1963
1964 struct input_devres {
1965 struct input_dev *input;
1966 };
1967
devm_input_device_match(struct device * dev,void * res,void * data)1968 static int devm_input_device_match(struct device *dev, void *res, void *data)
1969 {
1970 struct input_devres *devres = res;
1971
1972 return devres->input == data;
1973 }
1974
devm_input_device_release(struct device * dev,void * res)1975 static void devm_input_device_release(struct device *dev, void *res)
1976 {
1977 struct input_devres *devres = res;
1978 struct input_dev *input = devres->input;
1979
1980 dev_dbg(dev, "%s: dropping reference to %s\n",
1981 __func__, dev_name(&input->dev));
1982 input_put_device(input);
1983 }
1984
1985 /**
1986 * devm_input_allocate_device - allocate managed input device
1987 * @dev: device owning the input device being created
1988 *
1989 * Returns prepared struct input_dev or %NULL.
1990 *
1991 * Managed input devices do not need to be explicitly unregistered or
1992 * freed as it will be done automatically when owner device unbinds from
1993 * its driver (or binding fails). Once managed input device is allocated,
1994 * it is ready to be set up and registered in the same fashion as regular
1995 * input device. There are no special devm_input_device_[un]register()
1996 * variants, regular ones work with both managed and unmanaged devices,
1997 * should you need them. In most cases however, managed input device need
1998 * not be explicitly unregistered or freed.
1999 *
2000 * NOTE: the owner device is set up as parent of input device and users
2001 * should not override it.
2002 */
devm_input_allocate_device(struct device * dev)2003 struct input_dev *devm_input_allocate_device(struct device *dev)
2004 {
2005 struct input_dev *input;
2006 struct input_devres *devres;
2007
2008 devres = devres_alloc(devm_input_device_release,
2009 sizeof(*devres), GFP_KERNEL);
2010 if (!devres)
2011 return NULL;
2012
2013 input = input_allocate_device();
2014 if (!input) {
2015 devres_free(devres);
2016 return NULL;
2017 }
2018
2019 input->dev.parent = dev;
2020 input->devres_managed = true;
2021
2022 devres->input = input;
2023 devres_add(dev, devres);
2024
2025 return input;
2026 }
2027 EXPORT_SYMBOL(devm_input_allocate_device);
2028
2029 /**
2030 * input_free_device - free memory occupied by input_dev structure
2031 * @dev: input device to free
2032 *
2033 * This function should only be used if input_register_device()
2034 * was not called yet or if it failed. Once device was registered
2035 * use input_unregister_device() and memory will be freed once last
2036 * reference to the device is dropped.
2037 *
2038 * Device should be allocated by input_allocate_device().
2039 *
2040 * NOTE: If there are references to the input device then memory
2041 * will not be freed until last reference is dropped.
2042 */
input_free_device(struct input_dev * dev)2043 void input_free_device(struct input_dev *dev)
2044 {
2045 if (dev) {
2046 if (dev->devres_managed)
2047 WARN_ON(devres_destroy(dev->dev.parent,
2048 devm_input_device_release,
2049 devm_input_device_match,
2050 dev));
2051 input_put_device(dev);
2052 }
2053 }
2054 EXPORT_SYMBOL(input_free_device);
2055
2056 /**
2057 * input_set_timestamp - set timestamp for input events
2058 * @dev: input device to set timestamp for
2059 * @timestamp: the time at which the event has occurred
2060 * in CLOCK_MONOTONIC
2061 *
2062 * This function is intended to provide to the input system a more
2063 * accurate time of when an event actually occurred. The driver should
2064 * call this function as soon as a timestamp is acquired ensuring
2065 * clock conversions in input_set_timestamp are done correctly.
2066 *
2067 * The system entering suspend state between timestamp acquisition and
2068 * calling input_set_timestamp can result in inaccurate conversions.
2069 */
input_set_timestamp(struct input_dev * dev,ktime_t timestamp)2070 void input_set_timestamp(struct input_dev *dev, ktime_t timestamp)
2071 {
2072 dev->timestamp[INPUT_CLK_MONO] = timestamp;
2073 dev->timestamp[INPUT_CLK_REAL] = ktime_mono_to_real(timestamp);
2074 dev->timestamp[INPUT_CLK_BOOT] = ktime_mono_to_any(timestamp,
2075 TK_OFFS_BOOT);
2076 }
2077 EXPORT_SYMBOL(input_set_timestamp);
2078
2079 /**
2080 * input_get_timestamp - get timestamp for input events
2081 * @dev: input device to get timestamp from
2082 *
2083 * A valid timestamp is a timestamp of non-zero value.
2084 */
input_get_timestamp(struct input_dev * dev)2085 ktime_t *input_get_timestamp(struct input_dev *dev)
2086 {
2087 const ktime_t invalid_timestamp = ktime_set(0, 0);
2088
2089 if (!ktime_compare(dev->timestamp[INPUT_CLK_MONO], invalid_timestamp))
2090 input_set_timestamp(dev, ktime_get());
2091
2092 return dev->timestamp;
2093 }
2094 EXPORT_SYMBOL(input_get_timestamp);
2095
2096 /**
2097 * input_set_capability - mark device as capable of a certain event
2098 * @dev: device that is capable of emitting or accepting event
2099 * @type: type of the event (EV_KEY, EV_REL, etc...)
2100 * @code: event code
2101 *
2102 * In addition to setting up corresponding bit in appropriate capability
2103 * bitmap the function also adjusts dev->evbit.
2104 */
input_set_capability(struct input_dev * dev,unsigned int type,unsigned int code)2105 void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code)
2106 {
2107 if (type < EV_CNT && input_max_code[type] &&
2108 code > input_max_code[type]) {
2109 pr_err("%s: invalid code %u for type %u\n", __func__, code,
2110 type);
2111 dump_stack();
2112 return;
2113 }
2114
2115 switch (type) {
2116 case EV_KEY:
2117 __set_bit(code, dev->keybit);
2118 break;
2119
2120 case EV_REL:
2121 __set_bit(code, dev->relbit);
2122 break;
2123
2124 case EV_ABS:
2125 input_alloc_absinfo(dev);
2126 __set_bit(code, dev->absbit);
2127 break;
2128
2129 case EV_MSC:
2130 __set_bit(code, dev->mscbit);
2131 break;
2132
2133 case EV_SW:
2134 __set_bit(code, dev->swbit);
2135 break;
2136
2137 case EV_LED:
2138 __set_bit(code, dev->ledbit);
2139 break;
2140
2141 case EV_SND:
2142 __set_bit(code, dev->sndbit);
2143 break;
2144
2145 case EV_FF:
2146 __set_bit(code, dev->ffbit);
2147 break;
2148
2149 case EV_PWR:
2150 /* do nothing */
2151 break;
2152
2153 default:
2154 pr_err("%s: unknown type %u (code %u)\n", __func__, type, code);
2155 dump_stack();
2156 return;
2157 }
2158
2159 __set_bit(type, dev->evbit);
2160 }
2161 EXPORT_SYMBOL(input_set_capability);
2162
input_estimate_events_per_packet(struct input_dev * dev)2163 static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
2164 {
2165 int mt_slots;
2166 int i;
2167 unsigned int events;
2168
2169 if (dev->mt) {
2170 mt_slots = dev->mt->num_slots;
2171 } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) {
2172 mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum -
2173 dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1;
2174 mt_slots = clamp(mt_slots, 2, 32);
2175 } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
2176 mt_slots = 2;
2177 } else {
2178 mt_slots = 0;
2179 }
2180
2181 events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */
2182
2183 if (test_bit(EV_ABS, dev->evbit))
2184 for_each_set_bit(i, dev->absbit, ABS_CNT)
2185 events += input_is_mt_axis(i) ? mt_slots : 1;
2186
2187 if (test_bit(EV_REL, dev->evbit))
2188 events += bitmap_weight(dev->relbit, REL_CNT);
2189
2190 /* Make room for KEY and MSC events */
2191 events += 7;
2192
2193 return events;
2194 }
2195
2196 #define INPUT_CLEANSE_BITMASK(dev, type, bits) \
2197 do { \
2198 if (!test_bit(EV_##type, dev->evbit)) \
2199 memset(dev->bits##bit, 0, \
2200 sizeof(dev->bits##bit)); \
2201 } while (0)
2202
input_cleanse_bitmasks(struct input_dev * dev)2203 static void input_cleanse_bitmasks(struct input_dev *dev)
2204 {
2205 INPUT_CLEANSE_BITMASK(dev, KEY, key);
2206 INPUT_CLEANSE_BITMASK(dev, REL, rel);
2207 INPUT_CLEANSE_BITMASK(dev, ABS, abs);
2208 INPUT_CLEANSE_BITMASK(dev, MSC, msc);
2209 INPUT_CLEANSE_BITMASK(dev, LED, led);
2210 INPUT_CLEANSE_BITMASK(dev, SND, snd);
2211 INPUT_CLEANSE_BITMASK(dev, FF, ff);
2212 INPUT_CLEANSE_BITMASK(dev, SW, sw);
2213 }
2214
__input_unregister_device(struct input_dev * dev)2215 static void __input_unregister_device(struct input_dev *dev)
2216 {
2217 struct input_handle *handle, *next;
2218
2219 input_disconnect_device(dev);
2220
2221 scoped_guard(mutex, &input_mutex) {
2222 list_for_each_entry_safe(handle, next, &dev->h_list, d_node)
2223 handle->handler->disconnect(handle);
2224 WARN_ON(!list_empty(&dev->h_list));
2225
2226 timer_delete_sync(&dev->timer);
2227 list_del_init(&dev->node);
2228
2229 input_wakeup_procfs_readers();
2230 }
2231
2232 device_del(&dev->dev);
2233 }
2234
devm_input_device_unregister(struct device * dev,void * res)2235 static void devm_input_device_unregister(struct device *dev, void *res)
2236 {
2237 struct input_devres *devres = res;
2238 struct input_dev *input = devres->input;
2239
2240 dev_dbg(dev, "%s: unregistering device %s\n",
2241 __func__, dev_name(&input->dev));
2242 __input_unregister_device(input);
2243 }
2244
2245 /*
2246 * Generate software autorepeat event. Note that we take
2247 * dev->event_lock here to avoid racing with input_event
2248 * which may cause keys get "stuck".
2249 */
input_repeat_key(struct timer_list * t)2250 static void input_repeat_key(struct timer_list *t)
2251 {
2252 struct input_dev *dev = from_timer(dev, t, timer);
2253
2254 guard(spinlock_irqsave)(&dev->event_lock);
2255
2256 if (!dev->inhibited &&
2257 test_bit(dev->repeat_key, dev->key) &&
2258 is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
2259
2260 input_set_timestamp(dev, ktime_get());
2261 input_handle_event(dev, EV_KEY, dev->repeat_key, 2);
2262 input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
2263
2264 if (dev->rep[REP_PERIOD])
2265 mod_timer(&dev->timer, jiffies +
2266 msecs_to_jiffies(dev->rep[REP_PERIOD]));
2267 }
2268 }
2269
2270 /**
2271 * input_enable_softrepeat - enable software autorepeat
2272 * @dev: input device
2273 * @delay: repeat delay
2274 * @period: repeat period
2275 *
2276 * Enable software autorepeat on the input device.
2277 */
input_enable_softrepeat(struct input_dev * dev,int delay,int period)2278 void input_enable_softrepeat(struct input_dev *dev, int delay, int period)
2279 {
2280 dev->timer.function = input_repeat_key;
2281 dev->rep[REP_DELAY] = delay;
2282 dev->rep[REP_PERIOD] = period;
2283 }
2284 EXPORT_SYMBOL(input_enable_softrepeat);
2285
input_device_enabled(struct input_dev * dev)2286 bool input_device_enabled(struct input_dev *dev)
2287 {
2288 lockdep_assert_held(&dev->mutex);
2289
2290 return !dev->inhibited && dev->users > 0;
2291 }
2292 EXPORT_SYMBOL_GPL(input_device_enabled);
2293
input_device_tune_vals(struct input_dev * dev)2294 static int input_device_tune_vals(struct input_dev *dev)
2295 {
2296 struct input_value *vals;
2297 unsigned int packet_size;
2298 unsigned int max_vals;
2299
2300 packet_size = input_estimate_events_per_packet(dev);
2301 if (dev->hint_events_per_packet < packet_size)
2302 dev->hint_events_per_packet = packet_size;
2303
2304 max_vals = dev->hint_events_per_packet + 2;
2305 if (dev->max_vals >= max_vals)
2306 return 0;
2307
2308 vals = kcalloc(max_vals, sizeof(*vals), GFP_KERNEL);
2309 if (!vals)
2310 return -ENOMEM;
2311
2312 scoped_guard(spinlock_irq, &dev->event_lock) {
2313 dev->max_vals = max_vals;
2314 swap(dev->vals, vals);
2315 }
2316
2317 /* Because of swap() above, this frees the old vals memory */
2318 kfree(vals);
2319
2320 return 0;
2321 }
2322
2323 /**
2324 * input_register_device - register device with input core
2325 * @dev: device to be registered
2326 *
2327 * This function registers device with input core. The device must be
2328 * allocated with input_allocate_device() and all it's capabilities
2329 * set up before registering.
2330 * If function fails the device must be freed with input_free_device().
2331 * Once device has been successfully registered it can be unregistered
2332 * with input_unregister_device(); input_free_device() should not be
2333 * called in this case.
2334 *
2335 * Note that this function is also used to register managed input devices
2336 * (ones allocated with devm_input_allocate_device()). Such managed input
2337 * devices need not be explicitly unregistered or freed, their tear down
2338 * is controlled by the devres infrastructure. It is also worth noting
2339 * that tear down of managed input devices is internally a 2-step process:
2340 * registered managed input device is first unregistered, but stays in
2341 * memory and can still handle input_event() calls (although events will
2342 * not be delivered anywhere). The freeing of managed input device will
2343 * happen later, when devres stack is unwound to the point where device
2344 * allocation was made.
2345 */
input_register_device(struct input_dev * dev)2346 int input_register_device(struct input_dev *dev)
2347 {
2348 struct input_devres *devres = NULL;
2349 struct input_handler *handler;
2350 const char *path;
2351 int error;
2352
2353 if (test_bit(EV_ABS, dev->evbit) && !dev->absinfo) {
2354 dev_err(&dev->dev,
2355 "Absolute device without dev->absinfo, refusing to register\n");
2356 return -EINVAL;
2357 }
2358
2359 if (dev->devres_managed) {
2360 devres = devres_alloc(devm_input_device_unregister,
2361 sizeof(*devres), GFP_KERNEL);
2362 if (!devres)
2363 return -ENOMEM;
2364
2365 devres->input = dev;
2366 }
2367
2368 /* Every input device generates EV_SYN/SYN_REPORT events. */
2369 __set_bit(EV_SYN, dev->evbit);
2370
2371 /* KEY_RESERVED is not supposed to be transmitted to userspace. */
2372 __clear_bit(KEY_RESERVED, dev->keybit);
2373
2374 /* Make sure that bitmasks not mentioned in dev->evbit are clean. */
2375 input_cleanse_bitmasks(dev);
2376
2377 error = input_device_tune_vals(dev);
2378 if (error)
2379 goto err_devres_free;
2380
2381 /*
2382 * If delay and period are pre-set by the driver, then autorepeating
2383 * is handled by the driver itself and we don't do it in input.c.
2384 */
2385 if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD])
2386 input_enable_softrepeat(dev, 250, 33);
2387
2388 if (!dev->getkeycode)
2389 dev->getkeycode = input_default_getkeycode;
2390
2391 if (!dev->setkeycode)
2392 dev->setkeycode = input_default_setkeycode;
2393
2394 if (dev->poller)
2395 input_dev_poller_finalize(dev->poller);
2396
2397 error = device_add(&dev->dev);
2398 if (error)
2399 goto err_devres_free;
2400
2401 path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
2402 pr_info("%s as %s\n",
2403 dev->name ? dev->name : "Unspecified device",
2404 path ? path : "N/A");
2405 kfree(path);
2406
2407 error = -EINTR;
2408 scoped_cond_guard(mutex_intr, goto err_device_del, &input_mutex) {
2409 list_add_tail(&dev->node, &input_dev_list);
2410
2411 list_for_each_entry(handler, &input_handler_list, node)
2412 input_attach_handler(dev, handler);
2413
2414 input_wakeup_procfs_readers();
2415 }
2416
2417 if (dev->devres_managed) {
2418 dev_dbg(dev->dev.parent, "%s: registering %s with devres.\n",
2419 __func__, dev_name(&dev->dev));
2420 devres_add(dev->dev.parent, devres);
2421 }
2422 return 0;
2423
2424 err_device_del:
2425 device_del(&dev->dev);
2426 err_devres_free:
2427 devres_free(devres);
2428 return error;
2429 }
2430 EXPORT_SYMBOL(input_register_device);
2431
2432 /**
2433 * input_unregister_device - unregister previously registered device
2434 * @dev: device to be unregistered
2435 *
2436 * This function unregisters an input device. Once device is unregistered
2437 * the caller should not try to access it as it may get freed at any moment.
2438 */
input_unregister_device(struct input_dev * dev)2439 void input_unregister_device(struct input_dev *dev)
2440 {
2441 if (dev->devres_managed) {
2442 WARN_ON(devres_destroy(dev->dev.parent,
2443 devm_input_device_unregister,
2444 devm_input_device_match,
2445 dev));
2446 __input_unregister_device(dev);
2447 /*
2448 * We do not do input_put_device() here because it will be done
2449 * when 2nd devres fires up.
2450 */
2451 } else {
2452 __input_unregister_device(dev);
2453 input_put_device(dev);
2454 }
2455 }
2456 EXPORT_SYMBOL(input_unregister_device);
2457
input_handler_check_methods(const struct input_handler * handler)2458 static int input_handler_check_methods(const struct input_handler *handler)
2459 {
2460 int count = 0;
2461
2462 if (handler->filter)
2463 count++;
2464 if (handler->events)
2465 count++;
2466 if (handler->event)
2467 count++;
2468
2469 if (count > 1) {
2470 pr_err("%s: only one event processing method can be defined (%s)\n",
2471 __func__, handler->name);
2472 return -EINVAL;
2473 }
2474
2475 return 0;
2476 }
2477
2478 /**
2479 * input_register_handler - register a new input handler
2480 * @handler: handler to be registered
2481 *
2482 * This function registers a new input handler (interface) for input
2483 * devices in the system and attaches it to all input devices that
2484 * are compatible with the handler.
2485 */
input_register_handler(struct input_handler * handler)2486 int input_register_handler(struct input_handler *handler)
2487 {
2488 struct input_dev *dev;
2489 int error;
2490
2491 error = input_handler_check_methods(handler);
2492 if (error)
2493 return error;
2494
2495 scoped_cond_guard(mutex_intr, return -EINTR, &input_mutex) {
2496 INIT_LIST_HEAD(&handler->h_list);
2497
2498 list_add_tail(&handler->node, &input_handler_list);
2499
2500 list_for_each_entry(dev, &input_dev_list, node)
2501 input_attach_handler(dev, handler);
2502
2503 input_wakeup_procfs_readers();
2504 }
2505
2506 return 0;
2507 }
2508 EXPORT_SYMBOL(input_register_handler);
2509
2510 /**
2511 * input_unregister_handler - unregisters an input handler
2512 * @handler: handler to be unregistered
2513 *
2514 * This function disconnects a handler from its input devices and
2515 * removes it from lists of known handlers.
2516 */
input_unregister_handler(struct input_handler * handler)2517 void input_unregister_handler(struct input_handler *handler)
2518 {
2519 struct input_handle *handle, *next;
2520
2521 guard(mutex)(&input_mutex);
2522
2523 list_for_each_entry_safe(handle, next, &handler->h_list, h_node)
2524 handler->disconnect(handle);
2525 WARN_ON(!list_empty(&handler->h_list));
2526
2527 list_del_init(&handler->node);
2528
2529 input_wakeup_procfs_readers();
2530 }
2531 EXPORT_SYMBOL(input_unregister_handler);
2532
2533 /**
2534 * input_handler_for_each_handle - handle iterator
2535 * @handler: input handler to iterate
2536 * @data: data for the callback
2537 * @fn: function to be called for each handle
2538 *
2539 * Iterate over @bus's list of devices, and call @fn for each, passing
2540 * it @data and stop when @fn returns a non-zero value. The function is
2541 * using RCU to traverse the list and therefore may be using in atomic
2542 * contexts. The @fn callback is invoked from RCU critical section and
2543 * thus must not sleep.
2544 */
input_handler_for_each_handle(struct input_handler * handler,void * data,int (* fn)(struct input_handle *,void *))2545 int input_handler_for_each_handle(struct input_handler *handler, void *data,
2546 int (*fn)(struct input_handle *, void *))
2547 {
2548 struct input_handle *handle;
2549 int retval;
2550
2551 guard(rcu)();
2552
2553 list_for_each_entry_rcu(handle, &handler->h_list, h_node) {
2554 retval = fn(handle, data);
2555 if (retval)
2556 return retval;
2557 }
2558
2559 return 0;
2560 }
2561 EXPORT_SYMBOL(input_handler_for_each_handle);
2562
2563 /*
2564 * An implementation of input_handle's handle_events() method that simply
2565 * invokes handler->event() method for each event one by one.
2566 */
input_handle_events_default(struct input_handle * handle,struct input_value * vals,unsigned int count)2567 static unsigned int input_handle_events_default(struct input_handle *handle,
2568 struct input_value *vals,
2569 unsigned int count)
2570 {
2571 struct input_handler *handler = handle->handler;
2572 struct input_value *v;
2573
2574 for (v = vals; v != vals + count; v++)
2575 handler->event(handle, v->type, v->code, v->value);
2576
2577 return count;
2578 }
2579
2580 /*
2581 * An implementation of input_handle's handle_events() method that invokes
2582 * handler->filter() method for each event one by one and removes events
2583 * that were filtered out from the "vals" array.
2584 */
input_handle_events_filter(struct input_handle * handle,struct input_value * vals,unsigned int count)2585 static unsigned int input_handle_events_filter(struct input_handle *handle,
2586 struct input_value *vals,
2587 unsigned int count)
2588 {
2589 struct input_handler *handler = handle->handler;
2590 struct input_value *end = vals;
2591 struct input_value *v;
2592
2593 for (v = vals; v != vals + count; v++) {
2594 if (handler->filter(handle, v->type, v->code, v->value))
2595 continue;
2596 if (end != v)
2597 *end = *v;
2598 end++;
2599 }
2600
2601 return end - vals;
2602 }
2603
2604 /*
2605 * An implementation of input_handle's handle_events() method that does nothing.
2606 */
input_handle_events_null(struct input_handle * handle,struct input_value * vals,unsigned int count)2607 static unsigned int input_handle_events_null(struct input_handle *handle,
2608 struct input_value *vals,
2609 unsigned int count)
2610 {
2611 return count;
2612 }
2613
2614 /*
2615 * Sets up appropriate handle->event_handler based on the input_handler
2616 * associated with the handle.
2617 */
input_handle_setup_event_handler(struct input_handle * handle)2618 static void input_handle_setup_event_handler(struct input_handle *handle)
2619 {
2620 struct input_handler *handler = handle->handler;
2621
2622 if (handler->filter)
2623 handle->handle_events = input_handle_events_filter;
2624 else if (handler->event)
2625 handle->handle_events = input_handle_events_default;
2626 else if (handler->events)
2627 handle->handle_events = handler->events;
2628 else
2629 handle->handle_events = input_handle_events_null;
2630 }
2631
2632 /**
2633 * input_register_handle - register a new input handle
2634 * @handle: handle to register
2635 *
2636 * This function puts a new input handle onto device's
2637 * and handler's lists so that events can flow through
2638 * it once it is opened using input_open_device().
2639 *
2640 * This function is supposed to be called from handler's
2641 * connect() method.
2642 */
input_register_handle(struct input_handle * handle)2643 int input_register_handle(struct input_handle *handle)
2644 {
2645 struct input_handler *handler = handle->handler;
2646 struct input_dev *dev = handle->dev;
2647
2648 input_handle_setup_event_handler(handle);
2649 /*
2650 * We take dev->mutex here to prevent race with
2651 * input_release_device().
2652 */
2653 scoped_cond_guard(mutex_intr, return -EINTR, &dev->mutex) {
2654 /*
2655 * Filters go to the head of the list, normal handlers
2656 * to the tail.
2657 */
2658 if (handler->filter)
2659 list_add_rcu(&handle->d_node, &dev->h_list);
2660 else
2661 list_add_tail_rcu(&handle->d_node, &dev->h_list);
2662 }
2663
2664 /*
2665 * Since we are supposed to be called from ->connect()
2666 * which is mutually exclusive with ->disconnect()
2667 * we can't be racing with input_unregister_handle()
2668 * and so separate lock is not needed here.
2669 */
2670 list_add_tail_rcu(&handle->h_node, &handler->h_list);
2671
2672 if (handler->start)
2673 handler->start(handle);
2674
2675 return 0;
2676 }
2677 EXPORT_SYMBOL(input_register_handle);
2678
2679 /**
2680 * input_unregister_handle - unregister an input handle
2681 * @handle: handle to unregister
2682 *
2683 * This function removes input handle from device's
2684 * and handler's lists.
2685 *
2686 * This function is supposed to be called from handler's
2687 * disconnect() method.
2688 */
input_unregister_handle(struct input_handle * handle)2689 void input_unregister_handle(struct input_handle *handle)
2690 {
2691 struct input_dev *dev = handle->dev;
2692
2693 list_del_rcu(&handle->h_node);
2694
2695 /*
2696 * Take dev->mutex to prevent race with input_release_device().
2697 */
2698 scoped_guard(mutex, &dev->mutex)
2699 list_del_rcu(&handle->d_node);
2700
2701 synchronize_rcu();
2702 }
2703 EXPORT_SYMBOL(input_unregister_handle);
2704
2705 /**
2706 * input_get_new_minor - allocates a new input minor number
2707 * @legacy_base: beginning or the legacy range to be searched
2708 * @legacy_num: size of legacy range
2709 * @allow_dynamic: whether we can also take ID from the dynamic range
2710 *
2711 * This function allocates a new device minor for from input major namespace.
2712 * Caller can request legacy minor by specifying @legacy_base and @legacy_num
2713 * parameters and whether ID can be allocated from dynamic range if there are
2714 * no free IDs in legacy range.
2715 */
input_get_new_minor(int legacy_base,unsigned int legacy_num,bool allow_dynamic)2716 int input_get_new_minor(int legacy_base, unsigned int legacy_num,
2717 bool allow_dynamic)
2718 {
2719 /*
2720 * This function should be called from input handler's ->connect()
2721 * methods, which are serialized with input_mutex, so no additional
2722 * locking is needed here.
2723 */
2724 if (legacy_base >= 0) {
2725 int minor = ida_alloc_range(&input_ida, legacy_base,
2726 legacy_base + legacy_num - 1,
2727 GFP_KERNEL);
2728 if (minor >= 0 || !allow_dynamic)
2729 return minor;
2730 }
2731
2732 return ida_alloc_range(&input_ida, INPUT_FIRST_DYNAMIC_DEV,
2733 INPUT_MAX_CHAR_DEVICES - 1, GFP_KERNEL);
2734 }
2735 EXPORT_SYMBOL(input_get_new_minor);
2736
2737 /**
2738 * input_free_minor - release previously allocated minor
2739 * @minor: minor to be released
2740 *
2741 * This function releases previously allocated input minor so that it can be
2742 * reused later.
2743 */
input_free_minor(unsigned int minor)2744 void input_free_minor(unsigned int minor)
2745 {
2746 ida_free(&input_ida, minor);
2747 }
2748 EXPORT_SYMBOL(input_free_minor);
2749
input_init(void)2750 static int __init input_init(void)
2751 {
2752 int err;
2753
2754 err = class_register(&input_class);
2755 if (err) {
2756 pr_err("unable to register input_dev class\n");
2757 return err;
2758 }
2759
2760 err = input_proc_init();
2761 if (err)
2762 goto fail1;
2763
2764 err = register_chrdev_region(MKDEV(INPUT_MAJOR, 0),
2765 INPUT_MAX_CHAR_DEVICES, "input");
2766 if (err) {
2767 pr_err("unable to register char major %d", INPUT_MAJOR);
2768 goto fail2;
2769 }
2770
2771 return 0;
2772
2773 fail2: input_proc_exit();
2774 fail1: class_unregister(&input_class);
2775 return err;
2776 }
2777
input_exit(void)2778 static void __exit input_exit(void)
2779 {
2780 input_proc_exit();
2781 unregister_chrdev_region(MKDEV(INPUT_MAJOR, 0),
2782 INPUT_MAX_CHAR_DEVICES);
2783 class_unregister(&input_class);
2784 }
2785
2786 subsys_initcall(input_init);
2787 module_exit(input_exit);
2788