xref: /linux/drivers/input/input.c (revision bd628c1bed7902ec1f24ba0fe70758949146abbe)
1 /*
2  * The input core
3  *
4  * Copyright (c) 1999-2002 Vojtech Pavlik
5  */
6 
7 /*
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License version 2 as published by
10  * the Free Software Foundation.
11  */
12 
13 #define pr_fmt(fmt) KBUILD_BASENAME ": " fmt
14 
15 #include <linux/init.h>
16 #include <linux/types.h>
17 #include <linux/idr.h>
18 #include <linux/input/mt.h>
19 #include <linux/module.h>
20 #include <linux/slab.h>
21 #include <linux/random.h>
22 #include <linux/major.h>
23 #include <linux/proc_fs.h>
24 #include <linux/sched.h>
25 #include <linux/seq_file.h>
26 #include <linux/poll.h>
27 #include <linux/device.h>
28 #include <linux/mutex.h>
29 #include <linux/rcupdate.h>
30 #include "input-compat.h"
31 
32 MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>");
33 MODULE_DESCRIPTION("Input core");
34 MODULE_LICENSE("GPL");
35 
36 #define INPUT_MAX_CHAR_DEVICES		1024
37 #define INPUT_FIRST_DYNAMIC_DEV		256
38 static DEFINE_IDA(input_ida);
39 
40 static LIST_HEAD(input_dev_list);
41 static LIST_HEAD(input_handler_list);
42 
43 /*
44  * input_mutex protects access to both input_dev_list and input_handler_list.
45  * This also causes input_[un]register_device and input_[un]register_handler
46  * be mutually exclusive which simplifies locking in drivers implementing
47  * input handlers.
48  */
49 static DEFINE_MUTEX(input_mutex);
50 
51 static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 };
52 
53 static inline int is_event_supported(unsigned int code,
54 				     unsigned long *bm, unsigned int max)
55 {
56 	return code <= max && test_bit(code, bm);
57 }
58 
59 static int input_defuzz_abs_event(int value, int old_val, int fuzz)
60 {
61 	if (fuzz) {
62 		if (value > old_val - fuzz / 2 && value < old_val + fuzz / 2)
63 			return old_val;
64 
65 		if (value > old_val - fuzz && value < old_val + fuzz)
66 			return (old_val * 3 + value) / 4;
67 
68 		if (value > old_val - fuzz * 2 && value < old_val + fuzz * 2)
69 			return (old_val + value) / 2;
70 	}
71 
72 	return value;
73 }
74 
75 static void input_start_autorepeat(struct input_dev *dev, int code)
76 {
77 	if (test_bit(EV_REP, dev->evbit) &&
78 	    dev->rep[REP_PERIOD] && dev->rep[REP_DELAY] &&
79 	    dev->timer.function) {
80 		dev->repeat_key = code;
81 		mod_timer(&dev->timer,
82 			  jiffies + msecs_to_jiffies(dev->rep[REP_DELAY]));
83 	}
84 }
85 
86 static void input_stop_autorepeat(struct input_dev *dev)
87 {
88 	del_timer(&dev->timer);
89 }
90 
91 /*
92  * Pass event first through all filters and then, if event has not been
93  * filtered out, through all open handles. This function is called with
94  * dev->event_lock held and interrupts disabled.
95  */
96 static unsigned int input_to_handler(struct input_handle *handle,
97 			struct input_value *vals, unsigned int count)
98 {
99 	struct input_handler *handler = handle->handler;
100 	struct input_value *end = vals;
101 	struct input_value *v;
102 
103 	if (handler->filter) {
104 		for (v = vals; v != vals + count; v++) {
105 			if (handler->filter(handle, v->type, v->code, v->value))
106 				continue;
107 			if (end != v)
108 				*end = *v;
109 			end++;
110 		}
111 		count = end - vals;
112 	}
113 
114 	if (!count)
115 		return 0;
116 
117 	if (handler->events)
118 		handler->events(handle, vals, count);
119 	else if (handler->event)
120 		for (v = vals; v != vals + count; v++)
121 			handler->event(handle, v->type, v->code, v->value);
122 
123 	return count;
124 }
125 
126 /*
127  * Pass values first through all filters and then, if event has not been
128  * filtered out, through all open handles. This function is called with
129  * dev->event_lock held and interrupts disabled.
130  */
131 static void input_pass_values(struct input_dev *dev,
132 			      struct input_value *vals, unsigned int count)
133 {
134 	struct input_handle *handle;
135 	struct input_value *v;
136 
137 	if (!count)
138 		return;
139 
140 	rcu_read_lock();
141 
142 	handle = rcu_dereference(dev->grab);
143 	if (handle) {
144 		count = input_to_handler(handle, vals, count);
145 	} else {
146 		list_for_each_entry_rcu(handle, &dev->h_list, d_node)
147 			if (handle->open) {
148 				count = input_to_handler(handle, vals, count);
149 				if (!count)
150 					break;
151 			}
152 	}
153 
154 	rcu_read_unlock();
155 
156 	/* trigger auto repeat for key events */
157 	if (test_bit(EV_REP, dev->evbit) && test_bit(EV_KEY, dev->evbit)) {
158 		for (v = vals; v != vals + count; v++) {
159 			if (v->type == EV_KEY && v->value != 2) {
160 				if (v->value)
161 					input_start_autorepeat(dev, v->code);
162 				else
163 					input_stop_autorepeat(dev);
164 			}
165 		}
166 	}
167 }
168 
169 static void input_pass_event(struct input_dev *dev,
170 			     unsigned int type, unsigned int code, int value)
171 {
172 	struct input_value vals[] = { { type, code, value } };
173 
174 	input_pass_values(dev, vals, ARRAY_SIZE(vals));
175 }
176 
177 /*
178  * Generate software autorepeat event. Note that we take
179  * dev->event_lock here to avoid racing with input_event
180  * which may cause keys get "stuck".
181  */
182 static void input_repeat_key(struct timer_list *t)
183 {
184 	struct input_dev *dev = from_timer(dev, t, timer);
185 	unsigned long flags;
186 
187 	spin_lock_irqsave(&dev->event_lock, flags);
188 
189 	if (test_bit(dev->repeat_key, dev->key) &&
190 	    is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
191 		struct input_value vals[] =  {
192 			{ EV_KEY, dev->repeat_key, 2 },
193 			input_value_sync
194 		};
195 
196 		input_pass_values(dev, vals, ARRAY_SIZE(vals));
197 
198 		if (dev->rep[REP_PERIOD])
199 			mod_timer(&dev->timer, jiffies +
200 					msecs_to_jiffies(dev->rep[REP_PERIOD]));
201 	}
202 
203 	spin_unlock_irqrestore(&dev->event_lock, flags);
204 }
205 
206 #define INPUT_IGNORE_EVENT	0
207 #define INPUT_PASS_TO_HANDLERS	1
208 #define INPUT_PASS_TO_DEVICE	2
209 #define INPUT_SLOT		4
210 #define INPUT_FLUSH		8
211 #define INPUT_PASS_TO_ALL	(INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE)
212 
213 static int input_handle_abs_event(struct input_dev *dev,
214 				  unsigned int code, int *pval)
215 {
216 	struct input_mt *mt = dev->mt;
217 	bool is_mt_event;
218 	int *pold;
219 
220 	if (code == ABS_MT_SLOT) {
221 		/*
222 		 * "Stage" the event; we'll flush it later, when we
223 		 * get actual touch data.
224 		 */
225 		if (mt && *pval >= 0 && *pval < mt->num_slots)
226 			mt->slot = *pval;
227 
228 		return INPUT_IGNORE_EVENT;
229 	}
230 
231 	is_mt_event = input_is_mt_value(code);
232 
233 	if (!is_mt_event) {
234 		pold = &dev->absinfo[code].value;
235 	} else if (mt) {
236 		pold = &mt->slots[mt->slot].abs[code - ABS_MT_FIRST];
237 	} else {
238 		/*
239 		 * Bypass filtering for multi-touch events when
240 		 * not employing slots.
241 		 */
242 		pold = NULL;
243 	}
244 
245 	if (pold) {
246 		*pval = input_defuzz_abs_event(*pval, *pold,
247 						dev->absinfo[code].fuzz);
248 		if (*pold == *pval)
249 			return INPUT_IGNORE_EVENT;
250 
251 		*pold = *pval;
252 	}
253 
254 	/* Flush pending "slot" event */
255 	if (is_mt_event && mt && mt->slot != input_abs_get_val(dev, ABS_MT_SLOT)) {
256 		input_abs_set_val(dev, ABS_MT_SLOT, mt->slot);
257 		return INPUT_PASS_TO_HANDLERS | INPUT_SLOT;
258 	}
259 
260 	return INPUT_PASS_TO_HANDLERS;
261 }
262 
263 static int input_get_disposition(struct input_dev *dev,
264 			  unsigned int type, unsigned int code, int *pval)
265 {
266 	int disposition = INPUT_IGNORE_EVENT;
267 	int value = *pval;
268 
269 	switch (type) {
270 
271 	case EV_SYN:
272 		switch (code) {
273 		case SYN_CONFIG:
274 			disposition = INPUT_PASS_TO_ALL;
275 			break;
276 
277 		case SYN_REPORT:
278 			disposition = INPUT_PASS_TO_HANDLERS | INPUT_FLUSH;
279 			break;
280 		case SYN_MT_REPORT:
281 			disposition = INPUT_PASS_TO_HANDLERS;
282 			break;
283 		}
284 		break;
285 
286 	case EV_KEY:
287 		if (is_event_supported(code, dev->keybit, KEY_MAX)) {
288 
289 			/* auto-repeat bypasses state updates */
290 			if (value == 2) {
291 				disposition = INPUT_PASS_TO_HANDLERS;
292 				break;
293 			}
294 
295 			if (!!test_bit(code, dev->key) != !!value) {
296 
297 				__change_bit(code, dev->key);
298 				disposition = INPUT_PASS_TO_HANDLERS;
299 			}
300 		}
301 		break;
302 
303 	case EV_SW:
304 		if (is_event_supported(code, dev->swbit, SW_MAX) &&
305 		    !!test_bit(code, dev->sw) != !!value) {
306 
307 			__change_bit(code, dev->sw);
308 			disposition = INPUT_PASS_TO_HANDLERS;
309 		}
310 		break;
311 
312 	case EV_ABS:
313 		if (is_event_supported(code, dev->absbit, ABS_MAX))
314 			disposition = input_handle_abs_event(dev, code, &value);
315 
316 		break;
317 
318 	case EV_REL:
319 		if (is_event_supported(code, dev->relbit, REL_MAX) && value)
320 			disposition = INPUT_PASS_TO_HANDLERS;
321 
322 		break;
323 
324 	case EV_MSC:
325 		if (is_event_supported(code, dev->mscbit, MSC_MAX))
326 			disposition = INPUT_PASS_TO_ALL;
327 
328 		break;
329 
330 	case EV_LED:
331 		if (is_event_supported(code, dev->ledbit, LED_MAX) &&
332 		    !!test_bit(code, dev->led) != !!value) {
333 
334 			__change_bit(code, dev->led);
335 			disposition = INPUT_PASS_TO_ALL;
336 		}
337 		break;
338 
339 	case EV_SND:
340 		if (is_event_supported(code, dev->sndbit, SND_MAX)) {
341 
342 			if (!!test_bit(code, dev->snd) != !!value)
343 				__change_bit(code, dev->snd);
344 			disposition = INPUT_PASS_TO_ALL;
345 		}
346 		break;
347 
348 	case EV_REP:
349 		if (code <= REP_MAX && value >= 0 && dev->rep[code] != value) {
350 			dev->rep[code] = value;
351 			disposition = INPUT_PASS_TO_ALL;
352 		}
353 		break;
354 
355 	case EV_FF:
356 		if (value >= 0)
357 			disposition = INPUT_PASS_TO_ALL;
358 		break;
359 
360 	case EV_PWR:
361 		disposition = INPUT_PASS_TO_ALL;
362 		break;
363 	}
364 
365 	*pval = value;
366 	return disposition;
367 }
368 
369 static void input_handle_event(struct input_dev *dev,
370 			       unsigned int type, unsigned int code, int value)
371 {
372 	int disposition = input_get_disposition(dev, type, code, &value);
373 
374 	if (disposition != INPUT_IGNORE_EVENT && type != EV_SYN)
375 		add_input_randomness(type, code, value);
376 
377 	if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
378 		dev->event(dev, type, code, value);
379 
380 	if (!dev->vals)
381 		return;
382 
383 	if (disposition & INPUT_PASS_TO_HANDLERS) {
384 		struct input_value *v;
385 
386 		if (disposition & INPUT_SLOT) {
387 			v = &dev->vals[dev->num_vals++];
388 			v->type = EV_ABS;
389 			v->code = ABS_MT_SLOT;
390 			v->value = dev->mt->slot;
391 		}
392 
393 		v = &dev->vals[dev->num_vals++];
394 		v->type = type;
395 		v->code = code;
396 		v->value = value;
397 	}
398 
399 	if (disposition & INPUT_FLUSH) {
400 		if (dev->num_vals >= 2)
401 			input_pass_values(dev, dev->vals, dev->num_vals);
402 		dev->num_vals = 0;
403 	} else if (dev->num_vals >= dev->max_vals - 2) {
404 		dev->vals[dev->num_vals++] = input_value_sync;
405 		input_pass_values(dev, dev->vals, dev->num_vals);
406 		dev->num_vals = 0;
407 	}
408 
409 }
410 
411 /**
412  * input_event() - report new input event
413  * @dev: device that generated the event
414  * @type: type of the event
415  * @code: event code
416  * @value: value of the event
417  *
418  * This function should be used by drivers implementing various input
419  * devices to report input events. See also input_inject_event().
420  *
421  * NOTE: input_event() may be safely used right after input device was
422  * allocated with input_allocate_device(), even before it is registered
423  * with input_register_device(), but the event will not reach any of the
424  * input handlers. Such early invocation of input_event() may be used
425  * to 'seed' initial state of a switch or initial position of absolute
426  * axis, etc.
427  */
428 void input_event(struct input_dev *dev,
429 		 unsigned int type, unsigned int code, int value)
430 {
431 	unsigned long flags;
432 
433 	if (is_event_supported(type, dev->evbit, EV_MAX)) {
434 
435 		spin_lock_irqsave(&dev->event_lock, flags);
436 		input_handle_event(dev, type, code, value);
437 		spin_unlock_irqrestore(&dev->event_lock, flags);
438 	}
439 }
440 EXPORT_SYMBOL(input_event);
441 
442 /**
443  * input_inject_event() - send input event from input handler
444  * @handle: input handle to send event through
445  * @type: type of the event
446  * @code: event code
447  * @value: value of the event
448  *
449  * Similar to input_event() but will ignore event if device is
450  * "grabbed" and handle injecting event is not the one that owns
451  * the device.
452  */
453 void input_inject_event(struct input_handle *handle,
454 			unsigned int type, unsigned int code, int value)
455 {
456 	struct input_dev *dev = handle->dev;
457 	struct input_handle *grab;
458 	unsigned long flags;
459 
460 	if (is_event_supported(type, dev->evbit, EV_MAX)) {
461 		spin_lock_irqsave(&dev->event_lock, flags);
462 
463 		rcu_read_lock();
464 		grab = rcu_dereference(dev->grab);
465 		if (!grab || grab == handle)
466 			input_handle_event(dev, type, code, value);
467 		rcu_read_unlock();
468 
469 		spin_unlock_irqrestore(&dev->event_lock, flags);
470 	}
471 }
472 EXPORT_SYMBOL(input_inject_event);
473 
474 /**
475  * input_alloc_absinfo - allocates array of input_absinfo structs
476  * @dev: the input device emitting absolute events
477  *
478  * If the absinfo struct the caller asked for is already allocated, this
479  * functions will not do anything.
480  */
481 void input_alloc_absinfo(struct input_dev *dev)
482 {
483 	if (dev->absinfo)
484 		return;
485 
486 	dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo), GFP_KERNEL);
487 	if (!dev->absinfo) {
488 		dev_err(dev->dev.parent ?: &dev->dev,
489 			"%s: unable to allocate memory\n", __func__);
490 		/*
491 		 * We will handle this allocation failure in
492 		 * input_register_device() when we refuse to register input
493 		 * device with ABS bits but without absinfo.
494 		 */
495 	}
496 }
497 EXPORT_SYMBOL(input_alloc_absinfo);
498 
499 void input_set_abs_params(struct input_dev *dev, unsigned int axis,
500 			  int min, int max, int fuzz, int flat)
501 {
502 	struct input_absinfo *absinfo;
503 
504 	input_alloc_absinfo(dev);
505 	if (!dev->absinfo)
506 		return;
507 
508 	absinfo = &dev->absinfo[axis];
509 	absinfo->minimum = min;
510 	absinfo->maximum = max;
511 	absinfo->fuzz = fuzz;
512 	absinfo->flat = flat;
513 
514 	__set_bit(EV_ABS, dev->evbit);
515 	__set_bit(axis, dev->absbit);
516 }
517 EXPORT_SYMBOL(input_set_abs_params);
518 
519 
520 /**
521  * input_grab_device - grabs device for exclusive use
522  * @handle: input handle that wants to own the device
523  *
524  * When a device is grabbed by an input handle all events generated by
525  * the device are delivered only to this handle. Also events injected
526  * by other input handles are ignored while device is grabbed.
527  */
528 int input_grab_device(struct input_handle *handle)
529 {
530 	struct input_dev *dev = handle->dev;
531 	int retval;
532 
533 	retval = mutex_lock_interruptible(&dev->mutex);
534 	if (retval)
535 		return retval;
536 
537 	if (dev->grab) {
538 		retval = -EBUSY;
539 		goto out;
540 	}
541 
542 	rcu_assign_pointer(dev->grab, handle);
543 
544  out:
545 	mutex_unlock(&dev->mutex);
546 	return retval;
547 }
548 EXPORT_SYMBOL(input_grab_device);
549 
550 static void __input_release_device(struct input_handle *handle)
551 {
552 	struct input_dev *dev = handle->dev;
553 	struct input_handle *grabber;
554 
555 	grabber = rcu_dereference_protected(dev->grab,
556 					    lockdep_is_held(&dev->mutex));
557 	if (grabber == handle) {
558 		rcu_assign_pointer(dev->grab, NULL);
559 		/* Make sure input_pass_event() notices that grab is gone */
560 		synchronize_rcu();
561 
562 		list_for_each_entry(handle, &dev->h_list, d_node)
563 			if (handle->open && handle->handler->start)
564 				handle->handler->start(handle);
565 	}
566 }
567 
568 /**
569  * input_release_device - release previously grabbed device
570  * @handle: input handle that owns the device
571  *
572  * Releases previously grabbed device so that other input handles can
573  * start receiving input events. Upon release all handlers attached
574  * to the device have their start() method called so they have a change
575  * to synchronize device state with the rest of the system.
576  */
577 void input_release_device(struct input_handle *handle)
578 {
579 	struct input_dev *dev = handle->dev;
580 
581 	mutex_lock(&dev->mutex);
582 	__input_release_device(handle);
583 	mutex_unlock(&dev->mutex);
584 }
585 EXPORT_SYMBOL(input_release_device);
586 
587 /**
588  * input_open_device - open input device
589  * @handle: handle through which device is being accessed
590  *
591  * This function should be called by input handlers when they
592  * want to start receive events from given input device.
593  */
594 int input_open_device(struct input_handle *handle)
595 {
596 	struct input_dev *dev = handle->dev;
597 	int retval;
598 
599 	retval = mutex_lock_interruptible(&dev->mutex);
600 	if (retval)
601 		return retval;
602 
603 	if (dev->going_away) {
604 		retval = -ENODEV;
605 		goto out;
606 	}
607 
608 	handle->open++;
609 
610 	if (!dev->users++ && dev->open)
611 		retval = dev->open(dev);
612 
613 	if (retval) {
614 		dev->users--;
615 		if (!--handle->open) {
616 			/*
617 			 * Make sure we are not delivering any more events
618 			 * through this handle
619 			 */
620 			synchronize_rcu();
621 		}
622 	}
623 
624  out:
625 	mutex_unlock(&dev->mutex);
626 	return retval;
627 }
628 EXPORT_SYMBOL(input_open_device);
629 
630 int input_flush_device(struct input_handle *handle, struct file *file)
631 {
632 	struct input_dev *dev = handle->dev;
633 	int retval;
634 
635 	retval = mutex_lock_interruptible(&dev->mutex);
636 	if (retval)
637 		return retval;
638 
639 	if (dev->flush)
640 		retval = dev->flush(dev, file);
641 
642 	mutex_unlock(&dev->mutex);
643 	return retval;
644 }
645 EXPORT_SYMBOL(input_flush_device);
646 
647 /**
648  * input_close_device - close input device
649  * @handle: handle through which device is being accessed
650  *
651  * This function should be called by input handlers when they
652  * want to stop receive events from given input device.
653  */
654 void input_close_device(struct input_handle *handle)
655 {
656 	struct input_dev *dev = handle->dev;
657 
658 	mutex_lock(&dev->mutex);
659 
660 	__input_release_device(handle);
661 
662 	if (!--dev->users && dev->close)
663 		dev->close(dev);
664 
665 	if (!--handle->open) {
666 		/*
667 		 * synchronize_rcu() makes sure that input_pass_event()
668 		 * completed and that no more input events are delivered
669 		 * through this handle
670 		 */
671 		synchronize_rcu();
672 	}
673 
674 	mutex_unlock(&dev->mutex);
675 }
676 EXPORT_SYMBOL(input_close_device);
677 
678 /*
679  * Simulate keyup events for all keys that are marked as pressed.
680  * The function must be called with dev->event_lock held.
681  */
682 static void input_dev_release_keys(struct input_dev *dev)
683 {
684 	bool need_sync = false;
685 	int code;
686 
687 	if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) {
688 		for_each_set_bit(code, dev->key, KEY_CNT) {
689 			input_pass_event(dev, EV_KEY, code, 0);
690 			need_sync = true;
691 		}
692 
693 		if (need_sync)
694 			input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
695 
696 		memset(dev->key, 0, sizeof(dev->key));
697 	}
698 }
699 
700 /*
701  * Prepare device for unregistering
702  */
703 static void input_disconnect_device(struct input_dev *dev)
704 {
705 	struct input_handle *handle;
706 
707 	/*
708 	 * Mark device as going away. Note that we take dev->mutex here
709 	 * not to protect access to dev->going_away but rather to ensure
710 	 * that there are no threads in the middle of input_open_device()
711 	 */
712 	mutex_lock(&dev->mutex);
713 	dev->going_away = true;
714 	mutex_unlock(&dev->mutex);
715 
716 	spin_lock_irq(&dev->event_lock);
717 
718 	/*
719 	 * Simulate keyup events for all pressed keys so that handlers
720 	 * are not left with "stuck" keys. The driver may continue
721 	 * generate events even after we done here but they will not
722 	 * reach any handlers.
723 	 */
724 	input_dev_release_keys(dev);
725 
726 	list_for_each_entry(handle, &dev->h_list, d_node)
727 		handle->open = 0;
728 
729 	spin_unlock_irq(&dev->event_lock);
730 }
731 
732 /**
733  * input_scancode_to_scalar() - converts scancode in &struct input_keymap_entry
734  * @ke: keymap entry containing scancode to be converted.
735  * @scancode: pointer to the location where converted scancode should
736  *	be stored.
737  *
738  * This function is used to convert scancode stored in &struct keymap_entry
739  * into scalar form understood by legacy keymap handling methods. These
740  * methods expect scancodes to be represented as 'unsigned int'.
741  */
742 int input_scancode_to_scalar(const struct input_keymap_entry *ke,
743 			     unsigned int *scancode)
744 {
745 	switch (ke->len) {
746 	case 1:
747 		*scancode = *((u8 *)ke->scancode);
748 		break;
749 
750 	case 2:
751 		*scancode = *((u16 *)ke->scancode);
752 		break;
753 
754 	case 4:
755 		*scancode = *((u32 *)ke->scancode);
756 		break;
757 
758 	default:
759 		return -EINVAL;
760 	}
761 
762 	return 0;
763 }
764 EXPORT_SYMBOL(input_scancode_to_scalar);
765 
766 /*
767  * Those routines handle the default case where no [gs]etkeycode() is
768  * defined. In this case, an array indexed by the scancode is used.
769  */
770 
771 static unsigned int input_fetch_keycode(struct input_dev *dev,
772 					unsigned int index)
773 {
774 	switch (dev->keycodesize) {
775 	case 1:
776 		return ((u8 *)dev->keycode)[index];
777 
778 	case 2:
779 		return ((u16 *)dev->keycode)[index];
780 
781 	default:
782 		return ((u32 *)dev->keycode)[index];
783 	}
784 }
785 
786 static int input_default_getkeycode(struct input_dev *dev,
787 				    struct input_keymap_entry *ke)
788 {
789 	unsigned int index;
790 	int error;
791 
792 	if (!dev->keycodesize)
793 		return -EINVAL;
794 
795 	if (ke->flags & INPUT_KEYMAP_BY_INDEX)
796 		index = ke->index;
797 	else {
798 		error = input_scancode_to_scalar(ke, &index);
799 		if (error)
800 			return error;
801 	}
802 
803 	if (index >= dev->keycodemax)
804 		return -EINVAL;
805 
806 	ke->keycode = input_fetch_keycode(dev, index);
807 	ke->index = index;
808 	ke->len = sizeof(index);
809 	memcpy(ke->scancode, &index, sizeof(index));
810 
811 	return 0;
812 }
813 
814 static int input_default_setkeycode(struct input_dev *dev,
815 				    const struct input_keymap_entry *ke,
816 				    unsigned int *old_keycode)
817 {
818 	unsigned int index;
819 	int error;
820 	int i;
821 
822 	if (!dev->keycodesize)
823 		return -EINVAL;
824 
825 	if (ke->flags & INPUT_KEYMAP_BY_INDEX) {
826 		index = ke->index;
827 	} else {
828 		error = input_scancode_to_scalar(ke, &index);
829 		if (error)
830 			return error;
831 	}
832 
833 	if (index >= dev->keycodemax)
834 		return -EINVAL;
835 
836 	if (dev->keycodesize < sizeof(ke->keycode) &&
837 			(ke->keycode >> (dev->keycodesize * 8)))
838 		return -EINVAL;
839 
840 	switch (dev->keycodesize) {
841 		case 1: {
842 			u8 *k = (u8 *)dev->keycode;
843 			*old_keycode = k[index];
844 			k[index] = ke->keycode;
845 			break;
846 		}
847 		case 2: {
848 			u16 *k = (u16 *)dev->keycode;
849 			*old_keycode = k[index];
850 			k[index] = ke->keycode;
851 			break;
852 		}
853 		default: {
854 			u32 *k = (u32 *)dev->keycode;
855 			*old_keycode = k[index];
856 			k[index] = ke->keycode;
857 			break;
858 		}
859 	}
860 
861 	__clear_bit(*old_keycode, dev->keybit);
862 	__set_bit(ke->keycode, dev->keybit);
863 
864 	for (i = 0; i < dev->keycodemax; i++) {
865 		if (input_fetch_keycode(dev, i) == *old_keycode) {
866 			__set_bit(*old_keycode, dev->keybit);
867 			break; /* Setting the bit twice is useless, so break */
868 		}
869 	}
870 
871 	return 0;
872 }
873 
874 /**
875  * input_get_keycode - retrieve keycode currently mapped to a given scancode
876  * @dev: input device which keymap is being queried
877  * @ke: keymap entry
878  *
879  * This function should be called by anyone interested in retrieving current
880  * keymap. Presently evdev handlers use it.
881  */
882 int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke)
883 {
884 	unsigned long flags;
885 	int retval;
886 
887 	spin_lock_irqsave(&dev->event_lock, flags);
888 	retval = dev->getkeycode(dev, ke);
889 	spin_unlock_irqrestore(&dev->event_lock, flags);
890 
891 	return retval;
892 }
893 EXPORT_SYMBOL(input_get_keycode);
894 
895 /**
896  * input_set_keycode - attribute a keycode to a given scancode
897  * @dev: input device which keymap is being updated
898  * @ke: new keymap entry
899  *
900  * This function should be called by anyone needing to update current
901  * keymap. Presently keyboard and evdev handlers use it.
902  */
903 int input_set_keycode(struct input_dev *dev,
904 		      const struct input_keymap_entry *ke)
905 {
906 	unsigned long flags;
907 	unsigned int old_keycode;
908 	int retval;
909 
910 	if (ke->keycode > KEY_MAX)
911 		return -EINVAL;
912 
913 	spin_lock_irqsave(&dev->event_lock, flags);
914 
915 	retval = dev->setkeycode(dev, ke, &old_keycode);
916 	if (retval)
917 		goto out;
918 
919 	/* Make sure KEY_RESERVED did not get enabled. */
920 	__clear_bit(KEY_RESERVED, dev->keybit);
921 
922 	/*
923 	 * Simulate keyup event if keycode is not present
924 	 * in the keymap anymore
925 	 */
926 	if (test_bit(EV_KEY, dev->evbit) &&
927 	    !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
928 	    __test_and_clear_bit(old_keycode, dev->key)) {
929 		struct input_value vals[] =  {
930 			{ EV_KEY, old_keycode, 0 },
931 			input_value_sync
932 		};
933 
934 		input_pass_values(dev, vals, ARRAY_SIZE(vals));
935 	}
936 
937  out:
938 	spin_unlock_irqrestore(&dev->event_lock, flags);
939 
940 	return retval;
941 }
942 EXPORT_SYMBOL(input_set_keycode);
943 
944 bool input_match_device_id(const struct input_dev *dev,
945 			   const struct input_device_id *id)
946 {
947 	if (id->flags & INPUT_DEVICE_ID_MATCH_BUS)
948 		if (id->bustype != dev->id.bustype)
949 			return false;
950 
951 	if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR)
952 		if (id->vendor != dev->id.vendor)
953 			return false;
954 
955 	if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT)
956 		if (id->product != dev->id.product)
957 			return false;
958 
959 	if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION)
960 		if (id->version != dev->id.version)
961 			return false;
962 
963 	if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX) ||
964 	    !bitmap_subset(id->keybit, dev->keybit, KEY_MAX) ||
965 	    !bitmap_subset(id->relbit, dev->relbit, REL_MAX) ||
966 	    !bitmap_subset(id->absbit, dev->absbit, ABS_MAX) ||
967 	    !bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX) ||
968 	    !bitmap_subset(id->ledbit, dev->ledbit, LED_MAX) ||
969 	    !bitmap_subset(id->sndbit, dev->sndbit, SND_MAX) ||
970 	    !bitmap_subset(id->ffbit, dev->ffbit, FF_MAX) ||
971 	    !bitmap_subset(id->swbit, dev->swbit, SW_MAX) ||
972 	    !bitmap_subset(id->propbit, dev->propbit, INPUT_PROP_MAX)) {
973 		return false;
974 	}
975 
976 	return true;
977 }
978 EXPORT_SYMBOL(input_match_device_id);
979 
980 static const struct input_device_id *input_match_device(struct input_handler *handler,
981 							struct input_dev *dev)
982 {
983 	const struct input_device_id *id;
984 
985 	for (id = handler->id_table; id->flags || id->driver_info; id++) {
986 		if (input_match_device_id(dev, id) &&
987 		    (!handler->match || handler->match(handler, dev))) {
988 			return id;
989 		}
990 	}
991 
992 	return NULL;
993 }
994 
995 static int input_attach_handler(struct input_dev *dev, struct input_handler *handler)
996 {
997 	const struct input_device_id *id;
998 	int error;
999 
1000 	id = input_match_device(handler, dev);
1001 	if (!id)
1002 		return -ENODEV;
1003 
1004 	error = handler->connect(handler, dev, id);
1005 	if (error && error != -ENODEV)
1006 		pr_err("failed to attach handler %s to device %s, error: %d\n",
1007 		       handler->name, kobject_name(&dev->dev.kobj), error);
1008 
1009 	return error;
1010 }
1011 
1012 #ifdef CONFIG_COMPAT
1013 
1014 static int input_bits_to_string(char *buf, int buf_size,
1015 				unsigned long bits, bool skip_empty)
1016 {
1017 	int len = 0;
1018 
1019 	if (in_compat_syscall()) {
1020 		u32 dword = bits >> 32;
1021 		if (dword || !skip_empty)
1022 			len += snprintf(buf, buf_size, "%x ", dword);
1023 
1024 		dword = bits & 0xffffffffUL;
1025 		if (dword || !skip_empty || len)
1026 			len += snprintf(buf + len, max(buf_size - len, 0),
1027 					"%x", dword);
1028 	} else {
1029 		if (bits || !skip_empty)
1030 			len += snprintf(buf, buf_size, "%lx", bits);
1031 	}
1032 
1033 	return len;
1034 }
1035 
1036 #else /* !CONFIG_COMPAT */
1037 
1038 static int input_bits_to_string(char *buf, int buf_size,
1039 				unsigned long bits, bool skip_empty)
1040 {
1041 	return bits || !skip_empty ?
1042 		snprintf(buf, buf_size, "%lx", bits) : 0;
1043 }
1044 
1045 #endif
1046 
1047 #ifdef CONFIG_PROC_FS
1048 
1049 static struct proc_dir_entry *proc_bus_input_dir;
1050 static DECLARE_WAIT_QUEUE_HEAD(input_devices_poll_wait);
1051 static int input_devices_state;
1052 
1053 static inline void input_wakeup_procfs_readers(void)
1054 {
1055 	input_devices_state++;
1056 	wake_up(&input_devices_poll_wait);
1057 }
1058 
1059 static __poll_t input_proc_devices_poll(struct file *file, poll_table *wait)
1060 {
1061 	poll_wait(file, &input_devices_poll_wait, wait);
1062 	if (file->f_version != input_devices_state) {
1063 		file->f_version = input_devices_state;
1064 		return EPOLLIN | EPOLLRDNORM;
1065 	}
1066 
1067 	return 0;
1068 }
1069 
1070 union input_seq_state {
1071 	struct {
1072 		unsigned short pos;
1073 		bool mutex_acquired;
1074 	};
1075 	void *p;
1076 };
1077 
1078 static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos)
1079 {
1080 	union input_seq_state *state = (union input_seq_state *)&seq->private;
1081 	int error;
1082 
1083 	/* We need to fit into seq->private pointer */
1084 	BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private));
1085 
1086 	error = mutex_lock_interruptible(&input_mutex);
1087 	if (error) {
1088 		state->mutex_acquired = false;
1089 		return ERR_PTR(error);
1090 	}
1091 
1092 	state->mutex_acquired = true;
1093 
1094 	return seq_list_start(&input_dev_list, *pos);
1095 }
1096 
1097 static void *input_devices_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1098 {
1099 	return seq_list_next(v, &input_dev_list, pos);
1100 }
1101 
1102 static void input_seq_stop(struct seq_file *seq, void *v)
1103 {
1104 	union input_seq_state *state = (union input_seq_state *)&seq->private;
1105 
1106 	if (state->mutex_acquired)
1107 		mutex_unlock(&input_mutex);
1108 }
1109 
1110 static void input_seq_print_bitmap(struct seq_file *seq, const char *name,
1111 				   unsigned long *bitmap, int max)
1112 {
1113 	int i;
1114 	bool skip_empty = true;
1115 	char buf[18];
1116 
1117 	seq_printf(seq, "B: %s=", name);
1118 
1119 	for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) {
1120 		if (input_bits_to_string(buf, sizeof(buf),
1121 					 bitmap[i], skip_empty)) {
1122 			skip_empty = false;
1123 			seq_printf(seq, "%s%s", buf, i > 0 ? " " : "");
1124 		}
1125 	}
1126 
1127 	/*
1128 	 * If no output was produced print a single 0.
1129 	 */
1130 	if (skip_empty)
1131 		seq_putc(seq, '0');
1132 
1133 	seq_putc(seq, '\n');
1134 }
1135 
1136 static int input_devices_seq_show(struct seq_file *seq, void *v)
1137 {
1138 	struct input_dev *dev = container_of(v, struct input_dev, node);
1139 	const char *path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
1140 	struct input_handle *handle;
1141 
1142 	seq_printf(seq, "I: Bus=%04x Vendor=%04x Product=%04x Version=%04x\n",
1143 		   dev->id.bustype, dev->id.vendor, dev->id.product, dev->id.version);
1144 
1145 	seq_printf(seq, "N: Name=\"%s\"\n", dev->name ? dev->name : "");
1146 	seq_printf(seq, "P: Phys=%s\n", dev->phys ? dev->phys : "");
1147 	seq_printf(seq, "S: Sysfs=%s\n", path ? path : "");
1148 	seq_printf(seq, "U: Uniq=%s\n", dev->uniq ? dev->uniq : "");
1149 	seq_puts(seq, "H: Handlers=");
1150 
1151 	list_for_each_entry(handle, &dev->h_list, d_node)
1152 		seq_printf(seq, "%s ", handle->name);
1153 	seq_putc(seq, '\n');
1154 
1155 	input_seq_print_bitmap(seq, "PROP", dev->propbit, INPUT_PROP_MAX);
1156 
1157 	input_seq_print_bitmap(seq, "EV", dev->evbit, EV_MAX);
1158 	if (test_bit(EV_KEY, dev->evbit))
1159 		input_seq_print_bitmap(seq, "KEY", dev->keybit, KEY_MAX);
1160 	if (test_bit(EV_REL, dev->evbit))
1161 		input_seq_print_bitmap(seq, "REL", dev->relbit, REL_MAX);
1162 	if (test_bit(EV_ABS, dev->evbit))
1163 		input_seq_print_bitmap(seq, "ABS", dev->absbit, ABS_MAX);
1164 	if (test_bit(EV_MSC, dev->evbit))
1165 		input_seq_print_bitmap(seq, "MSC", dev->mscbit, MSC_MAX);
1166 	if (test_bit(EV_LED, dev->evbit))
1167 		input_seq_print_bitmap(seq, "LED", dev->ledbit, LED_MAX);
1168 	if (test_bit(EV_SND, dev->evbit))
1169 		input_seq_print_bitmap(seq, "SND", dev->sndbit, SND_MAX);
1170 	if (test_bit(EV_FF, dev->evbit))
1171 		input_seq_print_bitmap(seq, "FF", dev->ffbit, FF_MAX);
1172 	if (test_bit(EV_SW, dev->evbit))
1173 		input_seq_print_bitmap(seq, "SW", dev->swbit, SW_MAX);
1174 
1175 	seq_putc(seq, '\n');
1176 
1177 	kfree(path);
1178 	return 0;
1179 }
1180 
1181 static const struct seq_operations input_devices_seq_ops = {
1182 	.start	= input_devices_seq_start,
1183 	.next	= input_devices_seq_next,
1184 	.stop	= input_seq_stop,
1185 	.show	= input_devices_seq_show,
1186 };
1187 
1188 static int input_proc_devices_open(struct inode *inode, struct file *file)
1189 {
1190 	return seq_open(file, &input_devices_seq_ops);
1191 }
1192 
1193 static const struct file_operations input_devices_fileops = {
1194 	.owner		= THIS_MODULE,
1195 	.open		= input_proc_devices_open,
1196 	.poll		= input_proc_devices_poll,
1197 	.read		= seq_read,
1198 	.llseek		= seq_lseek,
1199 	.release	= seq_release,
1200 };
1201 
1202 static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos)
1203 {
1204 	union input_seq_state *state = (union input_seq_state *)&seq->private;
1205 	int error;
1206 
1207 	/* We need to fit into seq->private pointer */
1208 	BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private));
1209 
1210 	error = mutex_lock_interruptible(&input_mutex);
1211 	if (error) {
1212 		state->mutex_acquired = false;
1213 		return ERR_PTR(error);
1214 	}
1215 
1216 	state->mutex_acquired = true;
1217 	state->pos = *pos;
1218 
1219 	return seq_list_start(&input_handler_list, *pos);
1220 }
1221 
1222 static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1223 {
1224 	union input_seq_state *state = (union input_seq_state *)&seq->private;
1225 
1226 	state->pos = *pos + 1;
1227 	return seq_list_next(v, &input_handler_list, pos);
1228 }
1229 
1230 static int input_handlers_seq_show(struct seq_file *seq, void *v)
1231 {
1232 	struct input_handler *handler = container_of(v, struct input_handler, node);
1233 	union input_seq_state *state = (union input_seq_state *)&seq->private;
1234 
1235 	seq_printf(seq, "N: Number=%u Name=%s", state->pos, handler->name);
1236 	if (handler->filter)
1237 		seq_puts(seq, " (filter)");
1238 	if (handler->legacy_minors)
1239 		seq_printf(seq, " Minor=%d", handler->minor);
1240 	seq_putc(seq, '\n');
1241 
1242 	return 0;
1243 }
1244 
1245 static const struct seq_operations input_handlers_seq_ops = {
1246 	.start	= input_handlers_seq_start,
1247 	.next	= input_handlers_seq_next,
1248 	.stop	= input_seq_stop,
1249 	.show	= input_handlers_seq_show,
1250 };
1251 
1252 static int input_proc_handlers_open(struct inode *inode, struct file *file)
1253 {
1254 	return seq_open(file, &input_handlers_seq_ops);
1255 }
1256 
1257 static const struct file_operations input_handlers_fileops = {
1258 	.owner		= THIS_MODULE,
1259 	.open		= input_proc_handlers_open,
1260 	.read		= seq_read,
1261 	.llseek		= seq_lseek,
1262 	.release	= seq_release,
1263 };
1264 
1265 static int __init input_proc_init(void)
1266 {
1267 	struct proc_dir_entry *entry;
1268 
1269 	proc_bus_input_dir = proc_mkdir("bus/input", NULL);
1270 	if (!proc_bus_input_dir)
1271 		return -ENOMEM;
1272 
1273 	entry = proc_create("devices", 0, proc_bus_input_dir,
1274 			    &input_devices_fileops);
1275 	if (!entry)
1276 		goto fail1;
1277 
1278 	entry = proc_create("handlers", 0, proc_bus_input_dir,
1279 			    &input_handlers_fileops);
1280 	if (!entry)
1281 		goto fail2;
1282 
1283 	return 0;
1284 
1285  fail2:	remove_proc_entry("devices", proc_bus_input_dir);
1286  fail1: remove_proc_entry("bus/input", NULL);
1287 	return -ENOMEM;
1288 }
1289 
1290 static void input_proc_exit(void)
1291 {
1292 	remove_proc_entry("devices", proc_bus_input_dir);
1293 	remove_proc_entry("handlers", proc_bus_input_dir);
1294 	remove_proc_entry("bus/input", NULL);
1295 }
1296 
1297 #else /* !CONFIG_PROC_FS */
1298 static inline void input_wakeup_procfs_readers(void) { }
1299 static inline int input_proc_init(void) { return 0; }
1300 static inline void input_proc_exit(void) { }
1301 #endif
1302 
1303 #define INPUT_DEV_STRING_ATTR_SHOW(name)				\
1304 static ssize_t input_dev_show_##name(struct device *dev,		\
1305 				     struct device_attribute *attr,	\
1306 				     char *buf)				\
1307 {									\
1308 	struct input_dev *input_dev = to_input_dev(dev);		\
1309 									\
1310 	return scnprintf(buf, PAGE_SIZE, "%s\n",			\
1311 			 input_dev->name ? input_dev->name : "");	\
1312 }									\
1313 static DEVICE_ATTR(name, S_IRUGO, input_dev_show_##name, NULL)
1314 
1315 INPUT_DEV_STRING_ATTR_SHOW(name);
1316 INPUT_DEV_STRING_ATTR_SHOW(phys);
1317 INPUT_DEV_STRING_ATTR_SHOW(uniq);
1318 
1319 static int input_print_modalias_bits(char *buf, int size,
1320 				     char name, unsigned long *bm,
1321 				     unsigned int min_bit, unsigned int max_bit)
1322 {
1323 	int len = 0, i;
1324 
1325 	len += snprintf(buf, max(size, 0), "%c", name);
1326 	for (i = min_bit; i < max_bit; i++)
1327 		if (bm[BIT_WORD(i)] & BIT_MASK(i))
1328 			len += snprintf(buf + len, max(size - len, 0), "%X,", i);
1329 	return len;
1330 }
1331 
1332 static int input_print_modalias(char *buf, int size, struct input_dev *id,
1333 				int add_cr)
1334 {
1335 	int len;
1336 
1337 	len = snprintf(buf, max(size, 0),
1338 		       "input:b%04Xv%04Xp%04Xe%04X-",
1339 		       id->id.bustype, id->id.vendor,
1340 		       id->id.product, id->id.version);
1341 
1342 	len += input_print_modalias_bits(buf + len, size - len,
1343 				'e', id->evbit, 0, EV_MAX);
1344 	len += input_print_modalias_bits(buf + len, size - len,
1345 				'k', id->keybit, KEY_MIN_INTERESTING, KEY_MAX);
1346 	len += input_print_modalias_bits(buf + len, size - len,
1347 				'r', id->relbit, 0, REL_MAX);
1348 	len += input_print_modalias_bits(buf + len, size - len,
1349 				'a', id->absbit, 0, ABS_MAX);
1350 	len += input_print_modalias_bits(buf + len, size - len,
1351 				'm', id->mscbit, 0, MSC_MAX);
1352 	len += input_print_modalias_bits(buf + len, size - len,
1353 				'l', id->ledbit, 0, LED_MAX);
1354 	len += input_print_modalias_bits(buf + len, size - len,
1355 				's', id->sndbit, 0, SND_MAX);
1356 	len += input_print_modalias_bits(buf + len, size - len,
1357 				'f', id->ffbit, 0, FF_MAX);
1358 	len += input_print_modalias_bits(buf + len, size - len,
1359 				'w', id->swbit, 0, SW_MAX);
1360 
1361 	if (add_cr)
1362 		len += snprintf(buf + len, max(size - len, 0), "\n");
1363 
1364 	return len;
1365 }
1366 
1367 static ssize_t input_dev_show_modalias(struct device *dev,
1368 				       struct device_attribute *attr,
1369 				       char *buf)
1370 {
1371 	struct input_dev *id = to_input_dev(dev);
1372 	ssize_t len;
1373 
1374 	len = input_print_modalias(buf, PAGE_SIZE, id, 1);
1375 
1376 	return min_t(int, len, PAGE_SIZE);
1377 }
1378 static DEVICE_ATTR(modalias, S_IRUGO, input_dev_show_modalias, NULL);
1379 
1380 static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap,
1381 			      int max, int add_cr);
1382 
1383 static ssize_t input_dev_show_properties(struct device *dev,
1384 					 struct device_attribute *attr,
1385 					 char *buf)
1386 {
1387 	struct input_dev *input_dev = to_input_dev(dev);
1388 	int len = input_print_bitmap(buf, PAGE_SIZE, input_dev->propbit,
1389 				     INPUT_PROP_MAX, true);
1390 	return min_t(int, len, PAGE_SIZE);
1391 }
1392 static DEVICE_ATTR(properties, S_IRUGO, input_dev_show_properties, NULL);
1393 
1394 static struct attribute *input_dev_attrs[] = {
1395 	&dev_attr_name.attr,
1396 	&dev_attr_phys.attr,
1397 	&dev_attr_uniq.attr,
1398 	&dev_attr_modalias.attr,
1399 	&dev_attr_properties.attr,
1400 	NULL
1401 };
1402 
1403 static const struct attribute_group input_dev_attr_group = {
1404 	.attrs	= input_dev_attrs,
1405 };
1406 
1407 #define INPUT_DEV_ID_ATTR(name)						\
1408 static ssize_t input_dev_show_id_##name(struct device *dev,		\
1409 					struct device_attribute *attr,	\
1410 					char *buf)			\
1411 {									\
1412 	struct input_dev *input_dev = to_input_dev(dev);		\
1413 	return scnprintf(buf, PAGE_SIZE, "%04x\n", input_dev->id.name);	\
1414 }									\
1415 static DEVICE_ATTR(name, S_IRUGO, input_dev_show_id_##name, NULL)
1416 
1417 INPUT_DEV_ID_ATTR(bustype);
1418 INPUT_DEV_ID_ATTR(vendor);
1419 INPUT_DEV_ID_ATTR(product);
1420 INPUT_DEV_ID_ATTR(version);
1421 
1422 static struct attribute *input_dev_id_attrs[] = {
1423 	&dev_attr_bustype.attr,
1424 	&dev_attr_vendor.attr,
1425 	&dev_attr_product.attr,
1426 	&dev_attr_version.attr,
1427 	NULL
1428 };
1429 
1430 static const struct attribute_group input_dev_id_attr_group = {
1431 	.name	= "id",
1432 	.attrs	= input_dev_id_attrs,
1433 };
1434 
1435 static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap,
1436 			      int max, int add_cr)
1437 {
1438 	int i;
1439 	int len = 0;
1440 	bool skip_empty = true;
1441 
1442 	for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) {
1443 		len += input_bits_to_string(buf + len, max(buf_size - len, 0),
1444 					    bitmap[i], skip_empty);
1445 		if (len) {
1446 			skip_empty = false;
1447 			if (i > 0)
1448 				len += snprintf(buf + len, max(buf_size - len, 0), " ");
1449 		}
1450 	}
1451 
1452 	/*
1453 	 * If no output was produced print a single 0.
1454 	 */
1455 	if (len == 0)
1456 		len = snprintf(buf, buf_size, "%d", 0);
1457 
1458 	if (add_cr)
1459 		len += snprintf(buf + len, max(buf_size - len, 0), "\n");
1460 
1461 	return len;
1462 }
1463 
1464 #define INPUT_DEV_CAP_ATTR(ev, bm)					\
1465 static ssize_t input_dev_show_cap_##bm(struct device *dev,		\
1466 				       struct device_attribute *attr,	\
1467 				       char *buf)			\
1468 {									\
1469 	struct input_dev *input_dev = to_input_dev(dev);		\
1470 	int len = input_print_bitmap(buf, PAGE_SIZE,			\
1471 				     input_dev->bm##bit, ev##_MAX,	\
1472 				     true);				\
1473 	return min_t(int, len, PAGE_SIZE);				\
1474 }									\
1475 static DEVICE_ATTR(bm, S_IRUGO, input_dev_show_cap_##bm, NULL)
1476 
1477 INPUT_DEV_CAP_ATTR(EV, ev);
1478 INPUT_DEV_CAP_ATTR(KEY, key);
1479 INPUT_DEV_CAP_ATTR(REL, rel);
1480 INPUT_DEV_CAP_ATTR(ABS, abs);
1481 INPUT_DEV_CAP_ATTR(MSC, msc);
1482 INPUT_DEV_CAP_ATTR(LED, led);
1483 INPUT_DEV_CAP_ATTR(SND, snd);
1484 INPUT_DEV_CAP_ATTR(FF, ff);
1485 INPUT_DEV_CAP_ATTR(SW, sw);
1486 
1487 static struct attribute *input_dev_caps_attrs[] = {
1488 	&dev_attr_ev.attr,
1489 	&dev_attr_key.attr,
1490 	&dev_attr_rel.attr,
1491 	&dev_attr_abs.attr,
1492 	&dev_attr_msc.attr,
1493 	&dev_attr_led.attr,
1494 	&dev_attr_snd.attr,
1495 	&dev_attr_ff.attr,
1496 	&dev_attr_sw.attr,
1497 	NULL
1498 };
1499 
1500 static const struct attribute_group input_dev_caps_attr_group = {
1501 	.name	= "capabilities",
1502 	.attrs	= input_dev_caps_attrs,
1503 };
1504 
1505 static const struct attribute_group *input_dev_attr_groups[] = {
1506 	&input_dev_attr_group,
1507 	&input_dev_id_attr_group,
1508 	&input_dev_caps_attr_group,
1509 	NULL
1510 };
1511 
1512 static void input_dev_release(struct device *device)
1513 {
1514 	struct input_dev *dev = to_input_dev(device);
1515 
1516 	input_ff_destroy(dev);
1517 	input_mt_destroy_slots(dev);
1518 	kfree(dev->absinfo);
1519 	kfree(dev->vals);
1520 	kfree(dev);
1521 
1522 	module_put(THIS_MODULE);
1523 }
1524 
1525 /*
1526  * Input uevent interface - loading event handlers based on
1527  * device bitfields.
1528  */
1529 static int input_add_uevent_bm_var(struct kobj_uevent_env *env,
1530 				   const char *name, unsigned long *bitmap, int max)
1531 {
1532 	int len;
1533 
1534 	if (add_uevent_var(env, "%s", name))
1535 		return -ENOMEM;
1536 
1537 	len = input_print_bitmap(&env->buf[env->buflen - 1],
1538 				 sizeof(env->buf) - env->buflen,
1539 				 bitmap, max, false);
1540 	if (len >= (sizeof(env->buf) - env->buflen))
1541 		return -ENOMEM;
1542 
1543 	env->buflen += len;
1544 	return 0;
1545 }
1546 
1547 static int input_add_uevent_modalias_var(struct kobj_uevent_env *env,
1548 					 struct input_dev *dev)
1549 {
1550 	int len;
1551 
1552 	if (add_uevent_var(env, "MODALIAS="))
1553 		return -ENOMEM;
1554 
1555 	len = input_print_modalias(&env->buf[env->buflen - 1],
1556 				   sizeof(env->buf) - env->buflen,
1557 				   dev, 0);
1558 	if (len >= (sizeof(env->buf) - env->buflen))
1559 		return -ENOMEM;
1560 
1561 	env->buflen += len;
1562 	return 0;
1563 }
1564 
1565 #define INPUT_ADD_HOTPLUG_VAR(fmt, val...)				\
1566 	do {								\
1567 		int err = add_uevent_var(env, fmt, val);		\
1568 		if (err)						\
1569 			return err;					\
1570 	} while (0)
1571 
1572 #define INPUT_ADD_HOTPLUG_BM_VAR(name, bm, max)				\
1573 	do {								\
1574 		int err = input_add_uevent_bm_var(env, name, bm, max);	\
1575 		if (err)						\
1576 			return err;					\
1577 	} while (0)
1578 
1579 #define INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev)				\
1580 	do {								\
1581 		int err = input_add_uevent_modalias_var(env, dev);	\
1582 		if (err)						\
1583 			return err;					\
1584 	} while (0)
1585 
1586 static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env)
1587 {
1588 	struct input_dev *dev = to_input_dev(device);
1589 
1590 	INPUT_ADD_HOTPLUG_VAR("PRODUCT=%x/%x/%x/%x",
1591 				dev->id.bustype, dev->id.vendor,
1592 				dev->id.product, dev->id.version);
1593 	if (dev->name)
1594 		INPUT_ADD_HOTPLUG_VAR("NAME=\"%s\"", dev->name);
1595 	if (dev->phys)
1596 		INPUT_ADD_HOTPLUG_VAR("PHYS=\"%s\"", dev->phys);
1597 	if (dev->uniq)
1598 		INPUT_ADD_HOTPLUG_VAR("UNIQ=\"%s\"", dev->uniq);
1599 
1600 	INPUT_ADD_HOTPLUG_BM_VAR("PROP=", dev->propbit, INPUT_PROP_MAX);
1601 
1602 	INPUT_ADD_HOTPLUG_BM_VAR("EV=", dev->evbit, EV_MAX);
1603 	if (test_bit(EV_KEY, dev->evbit))
1604 		INPUT_ADD_HOTPLUG_BM_VAR("KEY=", dev->keybit, KEY_MAX);
1605 	if (test_bit(EV_REL, dev->evbit))
1606 		INPUT_ADD_HOTPLUG_BM_VAR("REL=", dev->relbit, REL_MAX);
1607 	if (test_bit(EV_ABS, dev->evbit))
1608 		INPUT_ADD_HOTPLUG_BM_VAR("ABS=", dev->absbit, ABS_MAX);
1609 	if (test_bit(EV_MSC, dev->evbit))
1610 		INPUT_ADD_HOTPLUG_BM_VAR("MSC=", dev->mscbit, MSC_MAX);
1611 	if (test_bit(EV_LED, dev->evbit))
1612 		INPUT_ADD_HOTPLUG_BM_VAR("LED=", dev->ledbit, LED_MAX);
1613 	if (test_bit(EV_SND, dev->evbit))
1614 		INPUT_ADD_HOTPLUG_BM_VAR("SND=", dev->sndbit, SND_MAX);
1615 	if (test_bit(EV_FF, dev->evbit))
1616 		INPUT_ADD_HOTPLUG_BM_VAR("FF=", dev->ffbit, FF_MAX);
1617 	if (test_bit(EV_SW, dev->evbit))
1618 		INPUT_ADD_HOTPLUG_BM_VAR("SW=", dev->swbit, SW_MAX);
1619 
1620 	INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev);
1621 
1622 	return 0;
1623 }
1624 
1625 #define INPUT_DO_TOGGLE(dev, type, bits, on)				\
1626 	do {								\
1627 		int i;							\
1628 		bool active;						\
1629 									\
1630 		if (!test_bit(EV_##type, dev->evbit))			\
1631 			break;						\
1632 									\
1633 		for_each_set_bit(i, dev->bits##bit, type##_CNT) {	\
1634 			active = test_bit(i, dev->bits);		\
1635 			if (!active && !on)				\
1636 				continue;				\
1637 									\
1638 			dev->event(dev, EV_##type, i, on ? active : 0);	\
1639 		}							\
1640 	} while (0)
1641 
1642 static void input_dev_toggle(struct input_dev *dev, bool activate)
1643 {
1644 	if (!dev->event)
1645 		return;
1646 
1647 	INPUT_DO_TOGGLE(dev, LED, led, activate);
1648 	INPUT_DO_TOGGLE(dev, SND, snd, activate);
1649 
1650 	if (activate && test_bit(EV_REP, dev->evbit)) {
1651 		dev->event(dev, EV_REP, REP_PERIOD, dev->rep[REP_PERIOD]);
1652 		dev->event(dev, EV_REP, REP_DELAY, dev->rep[REP_DELAY]);
1653 	}
1654 }
1655 
1656 /**
1657  * input_reset_device() - reset/restore the state of input device
1658  * @dev: input device whose state needs to be reset
1659  *
1660  * This function tries to reset the state of an opened input device and
1661  * bring internal state and state if the hardware in sync with each other.
1662  * We mark all keys as released, restore LED state, repeat rate, etc.
1663  */
1664 void input_reset_device(struct input_dev *dev)
1665 {
1666 	unsigned long flags;
1667 
1668 	mutex_lock(&dev->mutex);
1669 	spin_lock_irqsave(&dev->event_lock, flags);
1670 
1671 	input_dev_toggle(dev, true);
1672 	input_dev_release_keys(dev);
1673 
1674 	spin_unlock_irqrestore(&dev->event_lock, flags);
1675 	mutex_unlock(&dev->mutex);
1676 }
1677 EXPORT_SYMBOL(input_reset_device);
1678 
1679 #ifdef CONFIG_PM_SLEEP
1680 static int input_dev_suspend(struct device *dev)
1681 {
1682 	struct input_dev *input_dev = to_input_dev(dev);
1683 
1684 	spin_lock_irq(&input_dev->event_lock);
1685 
1686 	/*
1687 	 * Keys that are pressed now are unlikely to be
1688 	 * still pressed when we resume.
1689 	 */
1690 	input_dev_release_keys(input_dev);
1691 
1692 	/* Turn off LEDs and sounds, if any are active. */
1693 	input_dev_toggle(input_dev, false);
1694 
1695 	spin_unlock_irq(&input_dev->event_lock);
1696 
1697 	return 0;
1698 }
1699 
1700 static int input_dev_resume(struct device *dev)
1701 {
1702 	struct input_dev *input_dev = to_input_dev(dev);
1703 
1704 	spin_lock_irq(&input_dev->event_lock);
1705 
1706 	/* Restore state of LEDs and sounds, if any were active. */
1707 	input_dev_toggle(input_dev, true);
1708 
1709 	spin_unlock_irq(&input_dev->event_lock);
1710 
1711 	return 0;
1712 }
1713 
1714 static int input_dev_freeze(struct device *dev)
1715 {
1716 	struct input_dev *input_dev = to_input_dev(dev);
1717 
1718 	spin_lock_irq(&input_dev->event_lock);
1719 
1720 	/*
1721 	 * Keys that are pressed now are unlikely to be
1722 	 * still pressed when we resume.
1723 	 */
1724 	input_dev_release_keys(input_dev);
1725 
1726 	spin_unlock_irq(&input_dev->event_lock);
1727 
1728 	return 0;
1729 }
1730 
1731 static int input_dev_poweroff(struct device *dev)
1732 {
1733 	struct input_dev *input_dev = to_input_dev(dev);
1734 
1735 	spin_lock_irq(&input_dev->event_lock);
1736 
1737 	/* Turn off LEDs and sounds, if any are active. */
1738 	input_dev_toggle(input_dev, false);
1739 
1740 	spin_unlock_irq(&input_dev->event_lock);
1741 
1742 	return 0;
1743 }
1744 
1745 static const struct dev_pm_ops input_dev_pm_ops = {
1746 	.suspend	= input_dev_suspend,
1747 	.resume		= input_dev_resume,
1748 	.freeze		= input_dev_freeze,
1749 	.poweroff	= input_dev_poweroff,
1750 	.restore	= input_dev_resume,
1751 };
1752 #endif /* CONFIG_PM */
1753 
1754 static const struct device_type input_dev_type = {
1755 	.groups		= input_dev_attr_groups,
1756 	.release	= input_dev_release,
1757 	.uevent		= input_dev_uevent,
1758 #ifdef CONFIG_PM_SLEEP
1759 	.pm		= &input_dev_pm_ops,
1760 #endif
1761 };
1762 
1763 static char *input_devnode(struct device *dev, umode_t *mode)
1764 {
1765 	return kasprintf(GFP_KERNEL, "input/%s", dev_name(dev));
1766 }
1767 
1768 struct class input_class = {
1769 	.name		= "input",
1770 	.devnode	= input_devnode,
1771 };
1772 EXPORT_SYMBOL_GPL(input_class);
1773 
1774 /**
1775  * input_allocate_device - allocate memory for new input device
1776  *
1777  * Returns prepared struct input_dev or %NULL.
1778  *
1779  * NOTE: Use input_free_device() to free devices that have not been
1780  * registered; input_unregister_device() should be used for already
1781  * registered devices.
1782  */
1783 struct input_dev *input_allocate_device(void)
1784 {
1785 	static atomic_t input_no = ATOMIC_INIT(-1);
1786 	struct input_dev *dev;
1787 
1788 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1789 	if (dev) {
1790 		dev->dev.type = &input_dev_type;
1791 		dev->dev.class = &input_class;
1792 		device_initialize(&dev->dev);
1793 		mutex_init(&dev->mutex);
1794 		spin_lock_init(&dev->event_lock);
1795 		timer_setup(&dev->timer, NULL, 0);
1796 		INIT_LIST_HEAD(&dev->h_list);
1797 		INIT_LIST_HEAD(&dev->node);
1798 
1799 		dev_set_name(&dev->dev, "input%lu",
1800 			     (unsigned long)atomic_inc_return(&input_no));
1801 
1802 		__module_get(THIS_MODULE);
1803 	}
1804 
1805 	return dev;
1806 }
1807 EXPORT_SYMBOL(input_allocate_device);
1808 
1809 struct input_devres {
1810 	struct input_dev *input;
1811 };
1812 
1813 static int devm_input_device_match(struct device *dev, void *res, void *data)
1814 {
1815 	struct input_devres *devres = res;
1816 
1817 	return devres->input == data;
1818 }
1819 
1820 static void devm_input_device_release(struct device *dev, void *res)
1821 {
1822 	struct input_devres *devres = res;
1823 	struct input_dev *input = devres->input;
1824 
1825 	dev_dbg(dev, "%s: dropping reference to %s\n",
1826 		__func__, dev_name(&input->dev));
1827 	input_put_device(input);
1828 }
1829 
1830 /**
1831  * devm_input_allocate_device - allocate managed input device
1832  * @dev: device owning the input device being created
1833  *
1834  * Returns prepared struct input_dev or %NULL.
1835  *
1836  * Managed input devices do not need to be explicitly unregistered or
1837  * freed as it will be done automatically when owner device unbinds from
1838  * its driver (or binding fails). Once managed input device is allocated,
1839  * it is ready to be set up and registered in the same fashion as regular
1840  * input device. There are no special devm_input_device_[un]register()
1841  * variants, regular ones work with both managed and unmanaged devices,
1842  * should you need them. In most cases however, managed input device need
1843  * not be explicitly unregistered or freed.
1844  *
1845  * NOTE: the owner device is set up as parent of input device and users
1846  * should not override it.
1847  */
1848 struct input_dev *devm_input_allocate_device(struct device *dev)
1849 {
1850 	struct input_dev *input;
1851 	struct input_devres *devres;
1852 
1853 	devres = devres_alloc(devm_input_device_release,
1854 			      sizeof(*devres), GFP_KERNEL);
1855 	if (!devres)
1856 		return NULL;
1857 
1858 	input = input_allocate_device();
1859 	if (!input) {
1860 		devres_free(devres);
1861 		return NULL;
1862 	}
1863 
1864 	input->dev.parent = dev;
1865 	input->devres_managed = true;
1866 
1867 	devres->input = input;
1868 	devres_add(dev, devres);
1869 
1870 	return input;
1871 }
1872 EXPORT_SYMBOL(devm_input_allocate_device);
1873 
1874 /**
1875  * input_free_device - free memory occupied by input_dev structure
1876  * @dev: input device to free
1877  *
1878  * This function should only be used if input_register_device()
1879  * was not called yet or if it failed. Once device was registered
1880  * use input_unregister_device() and memory will be freed once last
1881  * reference to the device is dropped.
1882  *
1883  * Device should be allocated by input_allocate_device().
1884  *
1885  * NOTE: If there are references to the input device then memory
1886  * will not be freed until last reference is dropped.
1887  */
1888 void input_free_device(struct input_dev *dev)
1889 {
1890 	if (dev) {
1891 		if (dev->devres_managed)
1892 			WARN_ON(devres_destroy(dev->dev.parent,
1893 						devm_input_device_release,
1894 						devm_input_device_match,
1895 						dev));
1896 		input_put_device(dev);
1897 	}
1898 }
1899 EXPORT_SYMBOL(input_free_device);
1900 
1901 /**
1902  * input_set_capability - mark device as capable of a certain event
1903  * @dev: device that is capable of emitting or accepting event
1904  * @type: type of the event (EV_KEY, EV_REL, etc...)
1905  * @code: event code
1906  *
1907  * In addition to setting up corresponding bit in appropriate capability
1908  * bitmap the function also adjusts dev->evbit.
1909  */
1910 void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code)
1911 {
1912 	switch (type) {
1913 	case EV_KEY:
1914 		__set_bit(code, dev->keybit);
1915 		break;
1916 
1917 	case EV_REL:
1918 		__set_bit(code, dev->relbit);
1919 		break;
1920 
1921 	case EV_ABS:
1922 		input_alloc_absinfo(dev);
1923 		if (!dev->absinfo)
1924 			return;
1925 
1926 		__set_bit(code, dev->absbit);
1927 		break;
1928 
1929 	case EV_MSC:
1930 		__set_bit(code, dev->mscbit);
1931 		break;
1932 
1933 	case EV_SW:
1934 		__set_bit(code, dev->swbit);
1935 		break;
1936 
1937 	case EV_LED:
1938 		__set_bit(code, dev->ledbit);
1939 		break;
1940 
1941 	case EV_SND:
1942 		__set_bit(code, dev->sndbit);
1943 		break;
1944 
1945 	case EV_FF:
1946 		__set_bit(code, dev->ffbit);
1947 		break;
1948 
1949 	case EV_PWR:
1950 		/* do nothing */
1951 		break;
1952 
1953 	default:
1954 		pr_err("%s: unknown type %u (code %u)\n", __func__, type, code);
1955 		dump_stack();
1956 		return;
1957 	}
1958 
1959 	__set_bit(type, dev->evbit);
1960 }
1961 EXPORT_SYMBOL(input_set_capability);
1962 
1963 static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
1964 {
1965 	int mt_slots;
1966 	int i;
1967 	unsigned int events;
1968 
1969 	if (dev->mt) {
1970 		mt_slots = dev->mt->num_slots;
1971 	} else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) {
1972 		mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum -
1973 			   dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1,
1974 		mt_slots = clamp(mt_slots, 2, 32);
1975 	} else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
1976 		mt_slots = 2;
1977 	} else {
1978 		mt_slots = 0;
1979 	}
1980 
1981 	events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */
1982 
1983 	if (test_bit(EV_ABS, dev->evbit))
1984 		for_each_set_bit(i, dev->absbit, ABS_CNT)
1985 			events += input_is_mt_axis(i) ? mt_slots : 1;
1986 
1987 	if (test_bit(EV_REL, dev->evbit))
1988 		events += bitmap_weight(dev->relbit, REL_CNT);
1989 
1990 	/* Make room for KEY and MSC events */
1991 	events += 7;
1992 
1993 	return events;
1994 }
1995 
1996 #define INPUT_CLEANSE_BITMASK(dev, type, bits)				\
1997 	do {								\
1998 		if (!test_bit(EV_##type, dev->evbit))			\
1999 			memset(dev->bits##bit, 0,			\
2000 				sizeof(dev->bits##bit));		\
2001 	} while (0)
2002 
2003 static void input_cleanse_bitmasks(struct input_dev *dev)
2004 {
2005 	INPUT_CLEANSE_BITMASK(dev, KEY, key);
2006 	INPUT_CLEANSE_BITMASK(dev, REL, rel);
2007 	INPUT_CLEANSE_BITMASK(dev, ABS, abs);
2008 	INPUT_CLEANSE_BITMASK(dev, MSC, msc);
2009 	INPUT_CLEANSE_BITMASK(dev, LED, led);
2010 	INPUT_CLEANSE_BITMASK(dev, SND, snd);
2011 	INPUT_CLEANSE_BITMASK(dev, FF, ff);
2012 	INPUT_CLEANSE_BITMASK(dev, SW, sw);
2013 }
2014 
2015 static void __input_unregister_device(struct input_dev *dev)
2016 {
2017 	struct input_handle *handle, *next;
2018 
2019 	input_disconnect_device(dev);
2020 
2021 	mutex_lock(&input_mutex);
2022 
2023 	list_for_each_entry_safe(handle, next, &dev->h_list, d_node)
2024 		handle->handler->disconnect(handle);
2025 	WARN_ON(!list_empty(&dev->h_list));
2026 
2027 	del_timer_sync(&dev->timer);
2028 	list_del_init(&dev->node);
2029 
2030 	input_wakeup_procfs_readers();
2031 
2032 	mutex_unlock(&input_mutex);
2033 
2034 	device_del(&dev->dev);
2035 }
2036 
2037 static void devm_input_device_unregister(struct device *dev, void *res)
2038 {
2039 	struct input_devres *devres = res;
2040 	struct input_dev *input = devres->input;
2041 
2042 	dev_dbg(dev, "%s: unregistering device %s\n",
2043 		__func__, dev_name(&input->dev));
2044 	__input_unregister_device(input);
2045 }
2046 
2047 /**
2048  * input_enable_softrepeat - enable software autorepeat
2049  * @dev: input device
2050  * @delay: repeat delay
2051  * @period: repeat period
2052  *
2053  * Enable software autorepeat on the input device.
2054  */
2055 void input_enable_softrepeat(struct input_dev *dev, int delay, int period)
2056 {
2057 	dev->timer.function = input_repeat_key;
2058 	dev->rep[REP_DELAY] = delay;
2059 	dev->rep[REP_PERIOD] = period;
2060 }
2061 EXPORT_SYMBOL(input_enable_softrepeat);
2062 
2063 /**
2064  * input_register_device - register device with input core
2065  * @dev: device to be registered
2066  *
2067  * This function registers device with input core. The device must be
2068  * allocated with input_allocate_device() and all it's capabilities
2069  * set up before registering.
2070  * If function fails the device must be freed with input_free_device().
2071  * Once device has been successfully registered it can be unregistered
2072  * with input_unregister_device(); input_free_device() should not be
2073  * called in this case.
2074  *
2075  * Note that this function is also used to register managed input devices
2076  * (ones allocated with devm_input_allocate_device()). Such managed input
2077  * devices need not be explicitly unregistered or freed, their tear down
2078  * is controlled by the devres infrastructure. It is also worth noting
2079  * that tear down of managed input devices is internally a 2-step process:
2080  * registered managed input device is first unregistered, but stays in
2081  * memory and can still handle input_event() calls (although events will
2082  * not be delivered anywhere). The freeing of managed input device will
2083  * happen later, when devres stack is unwound to the point where device
2084  * allocation was made.
2085  */
2086 int input_register_device(struct input_dev *dev)
2087 {
2088 	struct input_devres *devres = NULL;
2089 	struct input_handler *handler;
2090 	unsigned int packet_size;
2091 	const char *path;
2092 	int error;
2093 
2094 	if (test_bit(EV_ABS, dev->evbit) && !dev->absinfo) {
2095 		dev_err(&dev->dev,
2096 			"Absolute device without dev->absinfo, refusing to register\n");
2097 		return -EINVAL;
2098 	}
2099 
2100 	if (dev->devres_managed) {
2101 		devres = devres_alloc(devm_input_device_unregister,
2102 				      sizeof(*devres), GFP_KERNEL);
2103 		if (!devres)
2104 			return -ENOMEM;
2105 
2106 		devres->input = dev;
2107 	}
2108 
2109 	/* Every input device generates EV_SYN/SYN_REPORT events. */
2110 	__set_bit(EV_SYN, dev->evbit);
2111 
2112 	/* KEY_RESERVED is not supposed to be transmitted to userspace. */
2113 	__clear_bit(KEY_RESERVED, dev->keybit);
2114 
2115 	/* Make sure that bitmasks not mentioned in dev->evbit are clean. */
2116 	input_cleanse_bitmasks(dev);
2117 
2118 	packet_size = input_estimate_events_per_packet(dev);
2119 	if (dev->hint_events_per_packet < packet_size)
2120 		dev->hint_events_per_packet = packet_size;
2121 
2122 	dev->max_vals = dev->hint_events_per_packet + 2;
2123 	dev->vals = kcalloc(dev->max_vals, sizeof(*dev->vals), GFP_KERNEL);
2124 	if (!dev->vals) {
2125 		error = -ENOMEM;
2126 		goto err_devres_free;
2127 	}
2128 
2129 	/*
2130 	 * If delay and period are pre-set by the driver, then autorepeating
2131 	 * is handled by the driver itself and we don't do it in input.c.
2132 	 */
2133 	if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD])
2134 		input_enable_softrepeat(dev, 250, 33);
2135 
2136 	if (!dev->getkeycode)
2137 		dev->getkeycode = input_default_getkeycode;
2138 
2139 	if (!dev->setkeycode)
2140 		dev->setkeycode = input_default_setkeycode;
2141 
2142 	error = device_add(&dev->dev);
2143 	if (error)
2144 		goto err_free_vals;
2145 
2146 	path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
2147 	pr_info("%s as %s\n",
2148 		dev->name ? dev->name : "Unspecified device",
2149 		path ? path : "N/A");
2150 	kfree(path);
2151 
2152 	error = mutex_lock_interruptible(&input_mutex);
2153 	if (error)
2154 		goto err_device_del;
2155 
2156 	list_add_tail(&dev->node, &input_dev_list);
2157 
2158 	list_for_each_entry(handler, &input_handler_list, node)
2159 		input_attach_handler(dev, handler);
2160 
2161 	input_wakeup_procfs_readers();
2162 
2163 	mutex_unlock(&input_mutex);
2164 
2165 	if (dev->devres_managed) {
2166 		dev_dbg(dev->dev.parent, "%s: registering %s with devres.\n",
2167 			__func__, dev_name(&dev->dev));
2168 		devres_add(dev->dev.parent, devres);
2169 	}
2170 	return 0;
2171 
2172 err_device_del:
2173 	device_del(&dev->dev);
2174 err_free_vals:
2175 	kfree(dev->vals);
2176 	dev->vals = NULL;
2177 err_devres_free:
2178 	devres_free(devres);
2179 	return error;
2180 }
2181 EXPORT_SYMBOL(input_register_device);
2182 
2183 /**
2184  * input_unregister_device - unregister previously registered device
2185  * @dev: device to be unregistered
2186  *
2187  * This function unregisters an input device. Once device is unregistered
2188  * the caller should not try to access it as it may get freed at any moment.
2189  */
2190 void input_unregister_device(struct input_dev *dev)
2191 {
2192 	if (dev->devres_managed) {
2193 		WARN_ON(devres_destroy(dev->dev.parent,
2194 					devm_input_device_unregister,
2195 					devm_input_device_match,
2196 					dev));
2197 		__input_unregister_device(dev);
2198 		/*
2199 		 * We do not do input_put_device() here because it will be done
2200 		 * when 2nd devres fires up.
2201 		 */
2202 	} else {
2203 		__input_unregister_device(dev);
2204 		input_put_device(dev);
2205 	}
2206 }
2207 EXPORT_SYMBOL(input_unregister_device);
2208 
2209 /**
2210  * input_register_handler - register a new input handler
2211  * @handler: handler to be registered
2212  *
2213  * This function registers a new input handler (interface) for input
2214  * devices in the system and attaches it to all input devices that
2215  * are compatible with the handler.
2216  */
2217 int input_register_handler(struct input_handler *handler)
2218 {
2219 	struct input_dev *dev;
2220 	int error;
2221 
2222 	error = mutex_lock_interruptible(&input_mutex);
2223 	if (error)
2224 		return error;
2225 
2226 	INIT_LIST_HEAD(&handler->h_list);
2227 
2228 	list_add_tail(&handler->node, &input_handler_list);
2229 
2230 	list_for_each_entry(dev, &input_dev_list, node)
2231 		input_attach_handler(dev, handler);
2232 
2233 	input_wakeup_procfs_readers();
2234 
2235 	mutex_unlock(&input_mutex);
2236 	return 0;
2237 }
2238 EXPORT_SYMBOL(input_register_handler);
2239 
2240 /**
2241  * input_unregister_handler - unregisters an input handler
2242  * @handler: handler to be unregistered
2243  *
2244  * This function disconnects a handler from its input devices and
2245  * removes it from lists of known handlers.
2246  */
2247 void input_unregister_handler(struct input_handler *handler)
2248 {
2249 	struct input_handle *handle, *next;
2250 
2251 	mutex_lock(&input_mutex);
2252 
2253 	list_for_each_entry_safe(handle, next, &handler->h_list, h_node)
2254 		handler->disconnect(handle);
2255 	WARN_ON(!list_empty(&handler->h_list));
2256 
2257 	list_del_init(&handler->node);
2258 
2259 	input_wakeup_procfs_readers();
2260 
2261 	mutex_unlock(&input_mutex);
2262 }
2263 EXPORT_SYMBOL(input_unregister_handler);
2264 
2265 /**
2266  * input_handler_for_each_handle - handle iterator
2267  * @handler: input handler to iterate
2268  * @data: data for the callback
2269  * @fn: function to be called for each handle
2270  *
2271  * Iterate over @bus's list of devices, and call @fn for each, passing
2272  * it @data and stop when @fn returns a non-zero value. The function is
2273  * using RCU to traverse the list and therefore may be using in atomic
2274  * contexts. The @fn callback is invoked from RCU critical section and
2275  * thus must not sleep.
2276  */
2277 int input_handler_for_each_handle(struct input_handler *handler, void *data,
2278 				  int (*fn)(struct input_handle *, void *))
2279 {
2280 	struct input_handle *handle;
2281 	int retval = 0;
2282 
2283 	rcu_read_lock();
2284 
2285 	list_for_each_entry_rcu(handle, &handler->h_list, h_node) {
2286 		retval = fn(handle, data);
2287 		if (retval)
2288 			break;
2289 	}
2290 
2291 	rcu_read_unlock();
2292 
2293 	return retval;
2294 }
2295 EXPORT_SYMBOL(input_handler_for_each_handle);
2296 
2297 /**
2298  * input_register_handle - register a new input handle
2299  * @handle: handle to register
2300  *
2301  * This function puts a new input handle onto device's
2302  * and handler's lists so that events can flow through
2303  * it once it is opened using input_open_device().
2304  *
2305  * This function is supposed to be called from handler's
2306  * connect() method.
2307  */
2308 int input_register_handle(struct input_handle *handle)
2309 {
2310 	struct input_handler *handler = handle->handler;
2311 	struct input_dev *dev = handle->dev;
2312 	int error;
2313 
2314 	/*
2315 	 * We take dev->mutex here to prevent race with
2316 	 * input_release_device().
2317 	 */
2318 	error = mutex_lock_interruptible(&dev->mutex);
2319 	if (error)
2320 		return error;
2321 
2322 	/*
2323 	 * Filters go to the head of the list, normal handlers
2324 	 * to the tail.
2325 	 */
2326 	if (handler->filter)
2327 		list_add_rcu(&handle->d_node, &dev->h_list);
2328 	else
2329 		list_add_tail_rcu(&handle->d_node, &dev->h_list);
2330 
2331 	mutex_unlock(&dev->mutex);
2332 
2333 	/*
2334 	 * Since we are supposed to be called from ->connect()
2335 	 * which is mutually exclusive with ->disconnect()
2336 	 * we can't be racing with input_unregister_handle()
2337 	 * and so separate lock is not needed here.
2338 	 */
2339 	list_add_tail_rcu(&handle->h_node, &handler->h_list);
2340 
2341 	if (handler->start)
2342 		handler->start(handle);
2343 
2344 	return 0;
2345 }
2346 EXPORT_SYMBOL(input_register_handle);
2347 
2348 /**
2349  * input_unregister_handle - unregister an input handle
2350  * @handle: handle to unregister
2351  *
2352  * This function removes input handle from device's
2353  * and handler's lists.
2354  *
2355  * This function is supposed to be called from handler's
2356  * disconnect() method.
2357  */
2358 void input_unregister_handle(struct input_handle *handle)
2359 {
2360 	struct input_dev *dev = handle->dev;
2361 
2362 	list_del_rcu(&handle->h_node);
2363 
2364 	/*
2365 	 * Take dev->mutex to prevent race with input_release_device().
2366 	 */
2367 	mutex_lock(&dev->mutex);
2368 	list_del_rcu(&handle->d_node);
2369 	mutex_unlock(&dev->mutex);
2370 
2371 	synchronize_rcu();
2372 }
2373 EXPORT_SYMBOL(input_unregister_handle);
2374 
2375 /**
2376  * input_get_new_minor - allocates a new input minor number
2377  * @legacy_base: beginning or the legacy range to be searched
2378  * @legacy_num: size of legacy range
2379  * @allow_dynamic: whether we can also take ID from the dynamic range
2380  *
2381  * This function allocates a new device minor for from input major namespace.
2382  * Caller can request legacy minor by specifying @legacy_base and @legacy_num
2383  * parameters and whether ID can be allocated from dynamic range if there are
2384  * no free IDs in legacy range.
2385  */
2386 int input_get_new_minor(int legacy_base, unsigned int legacy_num,
2387 			bool allow_dynamic)
2388 {
2389 	/*
2390 	 * This function should be called from input handler's ->connect()
2391 	 * methods, which are serialized with input_mutex, so no additional
2392 	 * locking is needed here.
2393 	 */
2394 	if (legacy_base >= 0) {
2395 		int minor = ida_simple_get(&input_ida,
2396 					   legacy_base,
2397 					   legacy_base + legacy_num,
2398 					   GFP_KERNEL);
2399 		if (minor >= 0 || !allow_dynamic)
2400 			return minor;
2401 	}
2402 
2403 	return ida_simple_get(&input_ida,
2404 			      INPUT_FIRST_DYNAMIC_DEV, INPUT_MAX_CHAR_DEVICES,
2405 			      GFP_KERNEL);
2406 }
2407 EXPORT_SYMBOL(input_get_new_minor);
2408 
2409 /**
2410  * input_free_minor - release previously allocated minor
2411  * @minor: minor to be released
2412  *
2413  * This function releases previously allocated input minor so that it can be
2414  * reused later.
2415  */
2416 void input_free_minor(unsigned int minor)
2417 {
2418 	ida_simple_remove(&input_ida, minor);
2419 }
2420 EXPORT_SYMBOL(input_free_minor);
2421 
2422 static int __init input_init(void)
2423 {
2424 	int err;
2425 
2426 	err = class_register(&input_class);
2427 	if (err) {
2428 		pr_err("unable to register input_dev class\n");
2429 		return err;
2430 	}
2431 
2432 	err = input_proc_init();
2433 	if (err)
2434 		goto fail1;
2435 
2436 	err = register_chrdev_region(MKDEV(INPUT_MAJOR, 0),
2437 				     INPUT_MAX_CHAR_DEVICES, "input");
2438 	if (err) {
2439 		pr_err("unable to register char major %d", INPUT_MAJOR);
2440 		goto fail2;
2441 	}
2442 
2443 	return 0;
2444 
2445  fail2:	input_proc_exit();
2446  fail1:	class_unregister(&input_class);
2447 	return err;
2448 }
2449 
2450 static void __exit input_exit(void)
2451 {
2452 	input_proc_exit();
2453 	unregister_chrdev_region(MKDEV(INPUT_MAJOR, 0),
2454 				 INPUT_MAX_CHAR_DEVICES);
2455 	class_unregister(&input_class);
2456 }
2457 
2458 subsys_initcall(input_init);
2459 module_exit(input_exit);
2460