xref: /linux/drivers/hid/hid-core.c (revision 0951fede4e5215e4529a3b711ac94fc84f90eca8)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  HID support for Linux
4  *
5  *  Copyright (c) 1999 Andreas Gal
6  *  Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
7  *  Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
8  *  Copyright (c) 2006-2012 Jiri Kosina
9  */
10 
11 /*
12  */
13 
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/list.h>
21 #include <linux/mm.h>
22 #include <linux/spinlock.h>
23 #include <linux/unaligned.h>
24 #include <asm/byteorder.h>
25 #include <linux/input.h>
26 #include <linux/wait.h>
27 #include <linux/vmalloc.h>
28 #include <linux/sched.h>
29 #include <linux/semaphore.h>
30 
31 #include <linux/hid.h>
32 #include <linux/hiddev.h>
33 #include <linux/hid-debug.h>
34 #include <linux/hidraw.h>
35 
36 #include "hid-ids.h"
37 
38 /*
39  * Version Information
40  */
41 
42 #define DRIVER_DESC "HID core driver"
43 
44 static int hid_ignore_special_drivers = 0;
45 module_param_named(ignore_special_drivers, hid_ignore_special_drivers, int, 0600);
46 MODULE_PARM_DESC(ignore_special_drivers, "Ignore any special drivers and handle all devices by generic driver");
47 
48 /*
49  * Register a new report for a device.
50  */
51 
hid_register_report(struct hid_device * device,enum hid_report_type type,unsigned int id,unsigned int application)52 struct hid_report *hid_register_report(struct hid_device *device,
53 				       enum hid_report_type type, unsigned int id,
54 				       unsigned int application)
55 {
56 	struct hid_report_enum *report_enum = device->report_enum + type;
57 	struct hid_report *report;
58 
59 	if (id >= HID_MAX_IDS)
60 		return NULL;
61 	if (report_enum->report_id_hash[id])
62 		return report_enum->report_id_hash[id];
63 
64 	report = kzalloc(sizeof(struct hid_report), GFP_KERNEL);
65 	if (!report)
66 		return NULL;
67 
68 	if (id != 0)
69 		report_enum->numbered = 1;
70 
71 	report->id = id;
72 	report->type = type;
73 	report->size = 0;
74 	report->device = device;
75 	report->application = application;
76 	report_enum->report_id_hash[id] = report;
77 
78 	list_add_tail(&report->list, &report_enum->report_list);
79 	INIT_LIST_HEAD(&report->field_entry_list);
80 
81 	return report;
82 }
83 EXPORT_SYMBOL_GPL(hid_register_report);
84 
85 /*
86  * Register a new field for this report.
87  */
88 
hid_register_field(struct hid_report * report,unsigned usages)89 static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages)
90 {
91 	struct hid_field *field;
92 
93 	if (report->maxfield == HID_MAX_FIELDS) {
94 		hid_err(report->device, "too many fields in report\n");
95 		return NULL;
96 	}
97 
98 	field = kvzalloc((sizeof(struct hid_field) +
99 			  usages * sizeof(struct hid_usage) +
100 			  3 * usages * sizeof(unsigned int)), GFP_KERNEL);
101 	if (!field)
102 		return NULL;
103 
104 	field->index = report->maxfield++;
105 	report->field[field->index] = field;
106 	field->usage = (struct hid_usage *)(field + 1);
107 	field->value = (s32 *)(field->usage + usages);
108 	field->new_value = (s32 *)(field->value + usages);
109 	field->usages_priorities = (s32 *)(field->new_value + usages);
110 	field->report = report;
111 
112 	return field;
113 }
114 
115 /*
116  * Open a collection. The type/usage is pushed on the stack.
117  */
118 
open_collection(struct hid_parser * parser,unsigned type)119 static int open_collection(struct hid_parser *parser, unsigned type)
120 {
121 	struct hid_collection *collection;
122 	unsigned usage;
123 	int collection_index;
124 
125 	usage = parser->local.usage[0];
126 
127 	if (parser->collection_stack_ptr == parser->collection_stack_size) {
128 		unsigned int *collection_stack;
129 		unsigned int new_size = parser->collection_stack_size +
130 					HID_COLLECTION_STACK_SIZE;
131 
132 		collection_stack = krealloc(parser->collection_stack,
133 					    new_size * sizeof(unsigned int),
134 					    GFP_KERNEL);
135 		if (!collection_stack)
136 			return -ENOMEM;
137 
138 		parser->collection_stack = collection_stack;
139 		parser->collection_stack_size = new_size;
140 	}
141 
142 	if (parser->device->maxcollection == parser->device->collection_size) {
143 		collection = kmalloc(
144 				array3_size(sizeof(struct hid_collection),
145 					    parser->device->collection_size,
146 					    2),
147 				GFP_KERNEL);
148 		if (collection == NULL) {
149 			hid_err(parser->device, "failed to reallocate collection array\n");
150 			return -ENOMEM;
151 		}
152 		memcpy(collection, parser->device->collection,
153 			sizeof(struct hid_collection) *
154 			parser->device->collection_size);
155 		memset(collection + parser->device->collection_size, 0,
156 			sizeof(struct hid_collection) *
157 			parser->device->collection_size);
158 		kfree(parser->device->collection);
159 		parser->device->collection = collection;
160 		parser->device->collection_size *= 2;
161 	}
162 
163 	parser->collection_stack[parser->collection_stack_ptr++] =
164 		parser->device->maxcollection;
165 
166 	collection_index = parser->device->maxcollection++;
167 	collection = parser->device->collection + collection_index;
168 	collection->type = type;
169 	collection->usage = usage;
170 	collection->level = parser->collection_stack_ptr - 1;
171 	collection->parent_idx = (collection->level == 0) ? -1 :
172 		parser->collection_stack[collection->level - 1];
173 
174 	if (type == HID_COLLECTION_APPLICATION)
175 		parser->device->maxapplication++;
176 
177 	return 0;
178 }
179 
180 /*
181  * Close a collection.
182  */
183 
close_collection(struct hid_parser * parser)184 static int close_collection(struct hid_parser *parser)
185 {
186 	if (!parser->collection_stack_ptr) {
187 		hid_err(parser->device, "collection stack underflow\n");
188 		return -EINVAL;
189 	}
190 	parser->collection_stack_ptr--;
191 	return 0;
192 }
193 
194 /*
195  * Climb up the stack, search for the specified collection type
196  * and return the usage.
197  */
198 
hid_lookup_collection(struct hid_parser * parser,unsigned type)199 static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
200 {
201 	struct hid_collection *collection = parser->device->collection;
202 	int n;
203 
204 	for (n = parser->collection_stack_ptr - 1; n >= 0; n--) {
205 		unsigned index = parser->collection_stack[n];
206 		if (collection[index].type == type)
207 			return collection[index].usage;
208 	}
209 	return 0; /* we know nothing about this usage type */
210 }
211 
212 /*
213  * Concatenate usage which defines 16 bits or less with the
214  * currently defined usage page to form a 32 bit usage
215  */
216 
complete_usage(struct hid_parser * parser,unsigned int index)217 static void complete_usage(struct hid_parser *parser, unsigned int index)
218 {
219 	parser->local.usage[index] &= 0xFFFF;
220 	parser->local.usage[index] |=
221 		(parser->global.usage_page & 0xFFFF) << 16;
222 }
223 
224 /*
225  * Add a usage to the temporary parser table.
226  */
227 
hid_add_usage(struct hid_parser * parser,unsigned usage,u8 size)228 static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size)
229 {
230 	if (parser->local.usage_index >= HID_MAX_USAGES) {
231 		hid_err(parser->device, "usage index exceeded\n");
232 		return -1;
233 	}
234 	parser->local.usage[parser->local.usage_index] = usage;
235 
236 	/*
237 	 * If Usage item only includes usage id, concatenate it with
238 	 * currently defined usage page
239 	 */
240 	if (size <= 2)
241 		complete_usage(parser, parser->local.usage_index);
242 
243 	parser->local.usage_size[parser->local.usage_index] = size;
244 	parser->local.collection_index[parser->local.usage_index] =
245 		parser->collection_stack_ptr ?
246 		parser->collection_stack[parser->collection_stack_ptr - 1] : 0;
247 	parser->local.usage_index++;
248 	return 0;
249 }
250 
251 /*
252  * Register a new field for this report.
253  */
254 
hid_add_field(struct hid_parser * parser,unsigned report_type,unsigned flags)255 static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsigned flags)
256 {
257 	struct hid_report *report;
258 	struct hid_field *field;
259 	unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
260 	unsigned int usages;
261 	unsigned int offset;
262 	unsigned int i;
263 	unsigned int application;
264 
265 	application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION);
266 
267 	report = hid_register_report(parser->device, report_type,
268 				     parser->global.report_id, application);
269 	if (!report) {
270 		hid_err(parser->device, "hid_register_report failed\n");
271 		return -1;
272 	}
273 
274 	/* Handle both signed and unsigned cases properly */
275 	if ((parser->global.logical_minimum < 0 &&
276 		parser->global.logical_maximum <
277 		parser->global.logical_minimum) ||
278 		(parser->global.logical_minimum >= 0 &&
279 		(__u32)parser->global.logical_maximum <
280 		(__u32)parser->global.logical_minimum)) {
281 		dbg_hid("logical range invalid 0x%x 0x%x\n",
282 			parser->global.logical_minimum,
283 			parser->global.logical_maximum);
284 		return -1;
285 	}
286 
287 	offset = report->size;
288 	report->size += parser->global.report_size * parser->global.report_count;
289 
290 	if (parser->device->ll_driver->max_buffer_size)
291 		max_buffer_size = parser->device->ll_driver->max_buffer_size;
292 
293 	/* Total size check: Allow for possible report index byte */
294 	if (report->size > (max_buffer_size - 1) << 3) {
295 		hid_err(parser->device, "report is too long\n");
296 		return -1;
297 	}
298 
299 	if (!parser->local.usage_index) /* Ignore padding fields */
300 		return 0;
301 
302 	usages = max_t(unsigned, parser->local.usage_index,
303 				 parser->global.report_count);
304 
305 	field = hid_register_field(report, usages);
306 	if (!field)
307 		return 0;
308 
309 	field->physical = hid_lookup_collection(parser, HID_COLLECTION_PHYSICAL);
310 	field->logical = hid_lookup_collection(parser, HID_COLLECTION_LOGICAL);
311 	field->application = application;
312 
313 	for (i = 0; i < usages; i++) {
314 		unsigned j = i;
315 		/* Duplicate the last usage we parsed if we have excess values */
316 		if (i >= parser->local.usage_index)
317 			j = parser->local.usage_index - 1;
318 		field->usage[i].hid = parser->local.usage[j];
319 		field->usage[i].collection_index =
320 			parser->local.collection_index[j];
321 		field->usage[i].usage_index = i;
322 		field->usage[i].resolution_multiplier = 1;
323 	}
324 
325 	field->maxusage = usages;
326 	field->flags = flags;
327 	field->report_offset = offset;
328 	field->report_type = report_type;
329 	field->report_size = parser->global.report_size;
330 	field->report_count = parser->global.report_count;
331 	field->logical_minimum = parser->global.logical_minimum;
332 	field->logical_maximum = parser->global.logical_maximum;
333 	field->physical_minimum = parser->global.physical_minimum;
334 	field->physical_maximum = parser->global.physical_maximum;
335 	field->unit_exponent = parser->global.unit_exponent;
336 	field->unit = parser->global.unit;
337 
338 	return 0;
339 }
340 
341 /*
342  * Read data value from item.
343  */
344 
item_udata(struct hid_item * item)345 static u32 item_udata(struct hid_item *item)
346 {
347 	switch (item->size) {
348 	case 1: return item->data.u8;
349 	case 2: return item->data.u16;
350 	case 4: return item->data.u32;
351 	}
352 	return 0;
353 }
354 
item_sdata(struct hid_item * item)355 static s32 item_sdata(struct hid_item *item)
356 {
357 	switch (item->size) {
358 	case 1: return item->data.s8;
359 	case 2: return item->data.s16;
360 	case 4: return item->data.s32;
361 	}
362 	return 0;
363 }
364 
365 /*
366  * Process a global item.
367  */
368 
hid_parser_global(struct hid_parser * parser,struct hid_item * item)369 static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
370 {
371 	__s32 raw_value;
372 	switch (item->tag) {
373 	case HID_GLOBAL_ITEM_TAG_PUSH:
374 
375 		if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) {
376 			hid_err(parser->device, "global environment stack overflow\n");
377 			return -1;
378 		}
379 
380 		memcpy(parser->global_stack + parser->global_stack_ptr++,
381 			&parser->global, sizeof(struct hid_global));
382 		return 0;
383 
384 	case HID_GLOBAL_ITEM_TAG_POP:
385 
386 		if (!parser->global_stack_ptr) {
387 			hid_err(parser->device, "global environment stack underflow\n");
388 			return -1;
389 		}
390 
391 		memcpy(&parser->global, parser->global_stack +
392 			--parser->global_stack_ptr, sizeof(struct hid_global));
393 		return 0;
394 
395 	case HID_GLOBAL_ITEM_TAG_USAGE_PAGE:
396 		parser->global.usage_page = item_udata(item);
397 		return 0;
398 
399 	case HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM:
400 		parser->global.logical_minimum = item_sdata(item);
401 		return 0;
402 
403 	case HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM:
404 		if (parser->global.logical_minimum < 0)
405 			parser->global.logical_maximum = item_sdata(item);
406 		else
407 			parser->global.logical_maximum = item_udata(item);
408 		return 0;
409 
410 	case HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM:
411 		parser->global.physical_minimum = item_sdata(item);
412 		return 0;
413 
414 	case HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM:
415 		if (parser->global.physical_minimum < 0)
416 			parser->global.physical_maximum = item_sdata(item);
417 		else
418 			parser->global.physical_maximum = item_udata(item);
419 		return 0;
420 
421 	case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT:
422 		/* Many devices provide unit exponent as a two's complement
423 		 * nibble due to the common misunderstanding of HID
424 		 * specification 1.11, 6.2.2.7 Global Items. Attempt to handle
425 		 * both this and the standard encoding. */
426 		raw_value = item_sdata(item);
427 		if (!(raw_value & 0xfffffff0))
428 			parser->global.unit_exponent = hid_snto32(raw_value, 4);
429 		else
430 			parser->global.unit_exponent = raw_value;
431 		return 0;
432 
433 	case HID_GLOBAL_ITEM_TAG_UNIT:
434 		parser->global.unit = item_udata(item);
435 		return 0;
436 
437 	case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
438 		parser->global.report_size = item_udata(item);
439 		if (parser->global.report_size > 256) {
440 			hid_err(parser->device, "invalid report_size %d\n",
441 					parser->global.report_size);
442 			return -1;
443 		}
444 		return 0;
445 
446 	case HID_GLOBAL_ITEM_TAG_REPORT_COUNT:
447 		parser->global.report_count = item_udata(item);
448 		if (parser->global.report_count > HID_MAX_USAGES) {
449 			hid_err(parser->device, "invalid report_count %d\n",
450 					parser->global.report_count);
451 			return -1;
452 		}
453 		return 0;
454 
455 	case HID_GLOBAL_ITEM_TAG_REPORT_ID:
456 		parser->global.report_id = item_udata(item);
457 		if (parser->global.report_id == 0 ||
458 		    parser->global.report_id >= HID_MAX_IDS) {
459 			hid_err(parser->device, "report_id %u is invalid\n",
460 				parser->global.report_id);
461 			return -1;
462 		}
463 		return 0;
464 
465 	default:
466 		hid_err(parser->device, "unknown global tag 0x%x\n", item->tag);
467 		return -1;
468 	}
469 }
470 
471 /*
472  * Process a local item.
473  */
474 
hid_parser_local(struct hid_parser * parser,struct hid_item * item)475 static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
476 {
477 	__u32 data;
478 	unsigned n;
479 	__u32 count;
480 
481 	data = item_udata(item);
482 
483 	switch (item->tag) {
484 	case HID_LOCAL_ITEM_TAG_DELIMITER:
485 
486 		if (data) {
487 			/*
488 			 * We treat items before the first delimiter
489 			 * as global to all usage sets (branch 0).
490 			 * In the moment we process only these global
491 			 * items and the first delimiter set.
492 			 */
493 			if (parser->local.delimiter_depth != 0) {
494 				hid_err(parser->device, "nested delimiters\n");
495 				return -1;
496 			}
497 			parser->local.delimiter_depth++;
498 			parser->local.delimiter_branch++;
499 		} else {
500 			if (parser->local.delimiter_depth < 1) {
501 				hid_err(parser->device, "bogus close delimiter\n");
502 				return -1;
503 			}
504 			parser->local.delimiter_depth--;
505 		}
506 		return 0;
507 
508 	case HID_LOCAL_ITEM_TAG_USAGE:
509 
510 		if (parser->local.delimiter_branch > 1) {
511 			dbg_hid("alternative usage ignored\n");
512 			return 0;
513 		}
514 
515 		return hid_add_usage(parser, data, item->size);
516 
517 	case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
518 
519 		if (parser->local.delimiter_branch > 1) {
520 			dbg_hid("alternative usage ignored\n");
521 			return 0;
522 		}
523 
524 		parser->local.usage_minimum = data;
525 		return 0;
526 
527 	case HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM:
528 
529 		if (parser->local.delimiter_branch > 1) {
530 			dbg_hid("alternative usage ignored\n");
531 			return 0;
532 		}
533 
534 		count = data - parser->local.usage_minimum;
535 		if (count + parser->local.usage_index >= HID_MAX_USAGES) {
536 			/*
537 			 * We do not warn if the name is not set, we are
538 			 * actually pre-scanning the device.
539 			 */
540 			if (dev_name(&parser->device->dev))
541 				hid_warn(parser->device,
542 					 "ignoring exceeding usage max\n");
543 			data = HID_MAX_USAGES - parser->local.usage_index +
544 				parser->local.usage_minimum - 1;
545 			if (data <= 0) {
546 				hid_err(parser->device,
547 					"no more usage index available\n");
548 				return -1;
549 			}
550 		}
551 
552 		for (n = parser->local.usage_minimum; n <= data; n++)
553 			if (hid_add_usage(parser, n, item->size)) {
554 				dbg_hid("hid_add_usage failed\n");
555 				return -1;
556 			}
557 		return 0;
558 
559 	default:
560 
561 		dbg_hid("unknown local item tag 0x%x\n", item->tag);
562 		return 0;
563 	}
564 	return 0;
565 }
566 
567 /*
568  * Concatenate Usage Pages into Usages where relevant:
569  * As per specification, 6.2.2.8: "When the parser encounters a main item it
570  * concatenates the last declared Usage Page with a Usage to form a complete
571  * usage value."
572  */
573 
hid_concatenate_last_usage_page(struct hid_parser * parser)574 static void hid_concatenate_last_usage_page(struct hid_parser *parser)
575 {
576 	int i;
577 	unsigned int usage_page;
578 	unsigned int current_page;
579 
580 	if (!parser->local.usage_index)
581 		return;
582 
583 	usage_page = parser->global.usage_page;
584 
585 	/*
586 	 * Concatenate usage page again only if last declared Usage Page
587 	 * has not been already used in previous usages concatenation
588 	 */
589 	for (i = parser->local.usage_index - 1; i >= 0; i--) {
590 		if (parser->local.usage_size[i] > 2)
591 			/* Ignore extended usages */
592 			continue;
593 
594 		current_page = parser->local.usage[i] >> 16;
595 		if (current_page == usage_page)
596 			break;
597 
598 		complete_usage(parser, i);
599 	}
600 }
601 
602 /*
603  * Process a main item.
604  */
605 
hid_parser_main(struct hid_parser * parser,struct hid_item * item)606 static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
607 {
608 	__u32 data;
609 	int ret;
610 
611 	hid_concatenate_last_usage_page(parser);
612 
613 	data = item_udata(item);
614 
615 	switch (item->tag) {
616 	case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
617 		ret = open_collection(parser, data & 0xff);
618 		break;
619 	case HID_MAIN_ITEM_TAG_END_COLLECTION:
620 		ret = close_collection(parser);
621 		break;
622 	case HID_MAIN_ITEM_TAG_INPUT:
623 		ret = hid_add_field(parser, HID_INPUT_REPORT, data);
624 		break;
625 	case HID_MAIN_ITEM_TAG_OUTPUT:
626 		ret = hid_add_field(parser, HID_OUTPUT_REPORT, data);
627 		break;
628 	case HID_MAIN_ITEM_TAG_FEATURE:
629 		ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
630 		break;
631 	default:
632 		hid_warn(parser->device, "unknown main item tag 0x%x\n", item->tag);
633 		ret = 0;
634 	}
635 
636 	memset(&parser->local, 0, sizeof(parser->local));	/* Reset the local parser environment */
637 
638 	return ret;
639 }
640 
641 /*
642  * Process a reserved item.
643  */
644 
hid_parser_reserved(struct hid_parser * parser,struct hid_item * item)645 static int hid_parser_reserved(struct hid_parser *parser, struct hid_item *item)
646 {
647 	dbg_hid("reserved item type, tag 0x%x\n", item->tag);
648 	return 0;
649 }
650 
651 /*
652  * Free a report and all registered fields. The field->usage and
653  * field->value table's are allocated behind the field, so we need
654  * only to free(field) itself.
655  */
656 
hid_free_report(struct hid_report * report)657 static void hid_free_report(struct hid_report *report)
658 {
659 	unsigned n;
660 
661 	kfree(report->field_entries);
662 
663 	for (n = 0; n < report->maxfield; n++)
664 		kvfree(report->field[n]);
665 	kfree(report);
666 }
667 
668 /*
669  * Close report. This function returns the device
670  * state to the point prior to hid_open_report().
671  */
hid_close_report(struct hid_device * device)672 static void hid_close_report(struct hid_device *device)
673 {
674 	unsigned i, j;
675 
676 	for (i = 0; i < HID_REPORT_TYPES; i++) {
677 		struct hid_report_enum *report_enum = device->report_enum + i;
678 
679 		for (j = 0; j < HID_MAX_IDS; j++) {
680 			struct hid_report *report = report_enum->report_id_hash[j];
681 			if (report)
682 				hid_free_report(report);
683 		}
684 		memset(report_enum, 0, sizeof(*report_enum));
685 		INIT_LIST_HEAD(&report_enum->report_list);
686 	}
687 
688 	kfree(device->rdesc);
689 	device->rdesc = NULL;
690 	device->rsize = 0;
691 
692 	kfree(device->collection);
693 	device->collection = NULL;
694 	device->collection_size = 0;
695 	device->maxcollection = 0;
696 	device->maxapplication = 0;
697 
698 	device->status &= ~HID_STAT_PARSED;
699 }
700 
701 /*
702  * Free a device structure, all reports, and all fields.
703  */
704 
hiddev_free(struct kref * ref)705 void hiddev_free(struct kref *ref)
706 {
707 	struct hid_device *hid = container_of(ref, struct hid_device, ref);
708 
709 	hid_close_report(hid);
710 	kfree(hid->dev_rdesc);
711 	kfree(hid);
712 }
713 
hid_device_release(struct device * dev)714 static void hid_device_release(struct device *dev)
715 {
716 	struct hid_device *hid = to_hid_device(dev);
717 
718 	kref_put(&hid->ref, hiddev_free);
719 }
720 
721 /*
722  * Fetch a report description item from the data stream. We support long
723  * items, though they are not used yet.
724  */
725 
fetch_item(const __u8 * start,const __u8 * end,struct hid_item * item)726 static const u8 *fetch_item(const __u8 *start, const __u8 *end, struct hid_item *item)
727 {
728 	u8 b;
729 
730 	if ((end - start) <= 0)
731 		return NULL;
732 
733 	b = *start++;
734 
735 	item->type = (b >> 2) & 3;
736 	item->tag  = (b >> 4) & 15;
737 
738 	if (item->tag == HID_ITEM_TAG_LONG) {
739 
740 		item->format = HID_ITEM_FORMAT_LONG;
741 
742 		if ((end - start) < 2)
743 			return NULL;
744 
745 		item->size = *start++;
746 		item->tag  = *start++;
747 
748 		if ((end - start) < item->size)
749 			return NULL;
750 
751 		item->data.longdata = start;
752 		start += item->size;
753 		return start;
754 	}
755 
756 	item->format = HID_ITEM_FORMAT_SHORT;
757 	item->size = b & 3;
758 
759 	switch (item->size) {
760 	case 0:
761 		return start;
762 
763 	case 1:
764 		if ((end - start) < 1)
765 			return NULL;
766 		item->data.u8 = *start++;
767 		return start;
768 
769 	case 2:
770 		if ((end - start) < 2)
771 			return NULL;
772 		item->data.u16 = get_unaligned_le16(start);
773 		start = (__u8 *)((__le16 *)start + 1);
774 		return start;
775 
776 	case 3:
777 		item->size++;
778 		if ((end - start) < 4)
779 			return NULL;
780 		item->data.u32 = get_unaligned_le32(start);
781 		start = (__u8 *)((__le32 *)start + 1);
782 		return start;
783 	}
784 
785 	return NULL;
786 }
787 
hid_scan_input_usage(struct hid_parser * parser,u32 usage)788 static void hid_scan_input_usage(struct hid_parser *parser, u32 usage)
789 {
790 	struct hid_device *hid = parser->device;
791 
792 	if (usage == HID_DG_CONTACTID)
793 		hid->group = HID_GROUP_MULTITOUCH;
794 }
795 
hid_scan_feature_usage(struct hid_parser * parser,u32 usage)796 static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage)
797 {
798 	if (usage == 0xff0000c5 && parser->global.report_count == 256 &&
799 	    parser->global.report_size == 8)
800 		parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
801 
802 	if (usage == 0xff0000c6 && parser->global.report_count == 1 &&
803 	    parser->global.report_size == 8)
804 		parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
805 }
806 
hid_scan_collection(struct hid_parser * parser,unsigned type)807 static void hid_scan_collection(struct hid_parser *parser, unsigned type)
808 {
809 	struct hid_device *hid = parser->device;
810 	int i;
811 
812 	if (((parser->global.usage_page << 16) == HID_UP_SENSOR) &&
813 	    (type == HID_COLLECTION_PHYSICAL ||
814 	     type == HID_COLLECTION_APPLICATION))
815 		hid->group = HID_GROUP_SENSOR_HUB;
816 
817 	if (hid->vendor == USB_VENDOR_ID_MICROSOFT &&
818 	    hid->product == USB_DEVICE_ID_MS_POWER_COVER &&
819 	    hid->group == HID_GROUP_MULTITOUCH)
820 		hid->group = HID_GROUP_GENERIC;
821 
822 	if ((parser->global.usage_page << 16) == HID_UP_GENDESK)
823 		for (i = 0; i < parser->local.usage_index; i++)
824 			if (parser->local.usage[i] == HID_GD_POINTER)
825 				parser->scan_flags |= HID_SCAN_FLAG_GD_POINTER;
826 
827 	if ((parser->global.usage_page << 16) >= HID_UP_MSVENDOR)
828 		parser->scan_flags |= HID_SCAN_FLAG_VENDOR_SPECIFIC;
829 
830 	if ((parser->global.usage_page << 16) == HID_UP_GOOGLEVENDOR)
831 		for (i = 0; i < parser->local.usage_index; i++)
832 			if (parser->local.usage[i] ==
833 					(HID_UP_GOOGLEVENDOR | 0x0001))
834 				parser->device->group =
835 					HID_GROUP_VIVALDI;
836 }
837 
hid_scan_main(struct hid_parser * parser,struct hid_item * item)838 static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
839 {
840 	__u32 data;
841 	int i;
842 
843 	hid_concatenate_last_usage_page(parser);
844 
845 	data = item_udata(item);
846 
847 	switch (item->tag) {
848 	case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
849 		hid_scan_collection(parser, data & 0xff);
850 		break;
851 	case HID_MAIN_ITEM_TAG_END_COLLECTION:
852 		break;
853 	case HID_MAIN_ITEM_TAG_INPUT:
854 		/* ignore constant inputs, they will be ignored by hid-input */
855 		if (data & HID_MAIN_ITEM_CONSTANT)
856 			break;
857 		for (i = 0; i < parser->local.usage_index; i++)
858 			hid_scan_input_usage(parser, parser->local.usage[i]);
859 		break;
860 	case HID_MAIN_ITEM_TAG_OUTPUT:
861 		break;
862 	case HID_MAIN_ITEM_TAG_FEATURE:
863 		for (i = 0; i < parser->local.usage_index; i++)
864 			hid_scan_feature_usage(parser, parser->local.usage[i]);
865 		break;
866 	}
867 
868 	/* Reset the local parser environment */
869 	memset(&parser->local, 0, sizeof(parser->local));
870 
871 	return 0;
872 }
873 
874 /*
875  * Scan a report descriptor before the device is added to the bus.
876  * Sets device groups and other properties that determine what driver
877  * to load.
878  */
hid_scan_report(struct hid_device * hid)879 static int hid_scan_report(struct hid_device *hid)
880 {
881 	struct hid_parser *parser;
882 	struct hid_item item;
883 	const __u8 *start = hid->dev_rdesc;
884 	const __u8 *end = start + hid->dev_rsize;
885 	static int (*dispatch_type[])(struct hid_parser *parser,
886 				      struct hid_item *item) = {
887 		hid_scan_main,
888 		hid_parser_global,
889 		hid_parser_local,
890 		hid_parser_reserved
891 	};
892 
893 	parser = vzalloc(sizeof(struct hid_parser));
894 	if (!parser)
895 		return -ENOMEM;
896 
897 	parser->device = hid;
898 	hid->group = HID_GROUP_GENERIC;
899 
900 	/*
901 	 * The parsing is simpler than the one in hid_open_report() as we should
902 	 * be robust against hid errors. Those errors will be raised by
903 	 * hid_open_report() anyway.
904 	 */
905 	while ((start = fetch_item(start, end, &item)) != NULL)
906 		dispatch_type[item.type](parser, &item);
907 
908 	/*
909 	 * Handle special flags set during scanning.
910 	 */
911 	if ((parser->scan_flags & HID_SCAN_FLAG_MT_WIN_8) &&
912 	    (hid->group == HID_GROUP_MULTITOUCH))
913 		hid->group = HID_GROUP_MULTITOUCH_WIN_8;
914 
915 	/*
916 	 * Vendor specific handlings
917 	 */
918 	switch (hid->vendor) {
919 	case USB_VENDOR_ID_WACOM:
920 		hid->group = HID_GROUP_WACOM;
921 		break;
922 	case USB_VENDOR_ID_SYNAPTICS:
923 		if (hid->group == HID_GROUP_GENERIC)
924 			if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC)
925 			    && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER))
926 				/*
927 				 * hid-rmi should take care of them,
928 				 * not hid-generic
929 				 */
930 				hid->group = HID_GROUP_RMI;
931 		break;
932 	}
933 
934 	kfree(parser->collection_stack);
935 	vfree(parser);
936 	return 0;
937 }
938 
939 /**
940  * hid_parse_report - parse device report
941  *
942  * @hid: hid device
943  * @start: report start
944  * @size: report size
945  *
946  * Allocate the device report as read by the bus driver. This function should
947  * only be called from parse() in ll drivers.
948  */
hid_parse_report(struct hid_device * hid,const __u8 * start,unsigned size)949 int hid_parse_report(struct hid_device *hid, const __u8 *start, unsigned size)
950 {
951 	hid->dev_rdesc = kmemdup(start, size, GFP_KERNEL);
952 	if (!hid->dev_rdesc)
953 		return -ENOMEM;
954 	hid->dev_rsize = size;
955 	return 0;
956 }
957 EXPORT_SYMBOL_GPL(hid_parse_report);
958 
959 static const char * const hid_report_names[] = {
960 	"HID_INPUT_REPORT",
961 	"HID_OUTPUT_REPORT",
962 	"HID_FEATURE_REPORT",
963 };
964 /**
965  * hid_validate_values - validate existing device report's value indexes
966  *
967  * @hid: hid device
968  * @type: which report type to examine
969  * @id: which report ID to examine (0 for first)
970  * @field_index: which report field to examine
971  * @report_counts: expected number of values
972  *
973  * Validate the number of values in a given field of a given report, after
974  * parsing.
975  */
hid_validate_values(struct hid_device * hid,enum hid_report_type type,unsigned int id,unsigned int field_index,unsigned int report_counts)976 struct hid_report *hid_validate_values(struct hid_device *hid,
977 				       enum hid_report_type type, unsigned int id,
978 				       unsigned int field_index,
979 				       unsigned int report_counts)
980 {
981 	struct hid_report *report;
982 
983 	if (type > HID_FEATURE_REPORT) {
984 		hid_err(hid, "invalid HID report type %u\n", type);
985 		return NULL;
986 	}
987 
988 	if (id >= HID_MAX_IDS) {
989 		hid_err(hid, "invalid HID report id %u\n", id);
990 		return NULL;
991 	}
992 
993 	/*
994 	 * Explicitly not using hid_get_report() here since it depends on
995 	 * ->numbered being checked, which may not always be the case when
996 	 * drivers go to access report values.
997 	 */
998 	if (id == 0) {
999 		/*
1000 		 * Validating on id 0 means we should examine the first
1001 		 * report in the list.
1002 		 */
1003 		report = list_first_entry_or_null(
1004 				&hid->report_enum[type].report_list,
1005 				struct hid_report, list);
1006 	} else {
1007 		report = hid->report_enum[type].report_id_hash[id];
1008 	}
1009 	if (!report) {
1010 		hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
1011 		return NULL;
1012 	}
1013 	if (report->maxfield <= field_index) {
1014 		hid_err(hid, "not enough fields in %s %u\n",
1015 			hid_report_names[type], id);
1016 		return NULL;
1017 	}
1018 	if (report->field[field_index]->report_count < report_counts) {
1019 		hid_err(hid, "not enough values in %s %u field %u\n",
1020 			hid_report_names[type], id, field_index);
1021 		return NULL;
1022 	}
1023 	return report;
1024 }
1025 EXPORT_SYMBOL_GPL(hid_validate_values);
1026 
hid_calculate_multiplier(struct hid_device * hid,struct hid_field * multiplier)1027 static int hid_calculate_multiplier(struct hid_device *hid,
1028 				     struct hid_field *multiplier)
1029 {
1030 	int m;
1031 	__s32 v = *multiplier->value;
1032 	__s32 lmin = multiplier->logical_minimum;
1033 	__s32 lmax = multiplier->logical_maximum;
1034 	__s32 pmin = multiplier->physical_minimum;
1035 	__s32 pmax = multiplier->physical_maximum;
1036 
1037 	/*
1038 	 * "Because OS implementations will generally divide the control's
1039 	 * reported count by the Effective Resolution Multiplier, designers
1040 	 * should take care not to establish a potential Effective
1041 	 * Resolution Multiplier of zero."
1042 	 * HID Usage Table, v1.12, Section 4.3.1, p31
1043 	 */
1044 	if (lmax - lmin == 0)
1045 		return 1;
1046 	/*
1047 	 * Handling the unit exponent is left as an exercise to whoever
1048 	 * finds a device where that exponent is not 0.
1049 	 */
1050 	m = ((v - lmin)/(lmax - lmin) * (pmax - pmin) + pmin);
1051 	if (unlikely(multiplier->unit_exponent != 0)) {
1052 		hid_warn(hid,
1053 			 "unsupported Resolution Multiplier unit exponent %d\n",
1054 			 multiplier->unit_exponent);
1055 	}
1056 
1057 	/* There are no devices with an effective multiplier > 255 */
1058 	if (unlikely(m == 0 || m > 255 || m < -255)) {
1059 		hid_warn(hid, "unsupported Resolution Multiplier %d\n", m);
1060 		m = 1;
1061 	}
1062 
1063 	return m;
1064 }
1065 
hid_apply_multiplier_to_field(struct hid_device * hid,struct hid_field * field,struct hid_collection * multiplier_collection,int effective_multiplier)1066 static void hid_apply_multiplier_to_field(struct hid_device *hid,
1067 					  struct hid_field *field,
1068 					  struct hid_collection *multiplier_collection,
1069 					  int effective_multiplier)
1070 {
1071 	struct hid_collection *collection;
1072 	struct hid_usage *usage;
1073 	int i;
1074 
1075 	/*
1076 	 * If multiplier_collection is NULL, the multiplier applies
1077 	 * to all fields in the report.
1078 	 * Otherwise, it is the Logical Collection the multiplier applies to
1079 	 * but our field may be in a subcollection of that collection.
1080 	 */
1081 	for (i = 0; i < field->maxusage; i++) {
1082 		usage = &field->usage[i];
1083 
1084 		collection = &hid->collection[usage->collection_index];
1085 		while (collection->parent_idx != -1 &&
1086 		       collection != multiplier_collection)
1087 			collection = &hid->collection[collection->parent_idx];
1088 
1089 		if (collection->parent_idx != -1 ||
1090 		    multiplier_collection == NULL)
1091 			usage->resolution_multiplier = effective_multiplier;
1092 
1093 	}
1094 }
1095 
hid_apply_multiplier(struct hid_device * hid,struct hid_field * multiplier)1096 static void hid_apply_multiplier(struct hid_device *hid,
1097 				 struct hid_field *multiplier)
1098 {
1099 	struct hid_report_enum *rep_enum;
1100 	struct hid_report *rep;
1101 	struct hid_field *field;
1102 	struct hid_collection *multiplier_collection;
1103 	int effective_multiplier;
1104 	int i;
1105 
1106 	/*
1107 	 * "The Resolution Multiplier control must be contained in the same
1108 	 * Logical Collection as the control(s) to which it is to be applied.
1109 	 * If no Resolution Multiplier is defined, then the Resolution
1110 	 * Multiplier defaults to 1.  If more than one control exists in a
1111 	 * Logical Collection, the Resolution Multiplier is associated with
1112 	 * all controls in the collection. If no Logical Collection is
1113 	 * defined, the Resolution Multiplier is associated with all
1114 	 * controls in the report."
1115 	 * HID Usage Table, v1.12, Section 4.3.1, p30
1116 	 *
1117 	 * Thus, search from the current collection upwards until we find a
1118 	 * logical collection. Then search all fields for that same parent
1119 	 * collection. Those are the fields the multiplier applies to.
1120 	 *
1121 	 * If we have more than one multiplier, it will overwrite the
1122 	 * applicable fields later.
1123 	 */
1124 	multiplier_collection = &hid->collection[multiplier->usage->collection_index];
1125 	while (multiplier_collection->parent_idx != -1 &&
1126 	       multiplier_collection->type != HID_COLLECTION_LOGICAL)
1127 		multiplier_collection = &hid->collection[multiplier_collection->parent_idx];
1128 
1129 	effective_multiplier = hid_calculate_multiplier(hid, multiplier);
1130 
1131 	rep_enum = &hid->report_enum[HID_INPUT_REPORT];
1132 	list_for_each_entry(rep, &rep_enum->report_list, list) {
1133 		for (i = 0; i < rep->maxfield; i++) {
1134 			field = rep->field[i];
1135 			hid_apply_multiplier_to_field(hid, field,
1136 						      multiplier_collection,
1137 						      effective_multiplier);
1138 		}
1139 	}
1140 }
1141 
1142 /*
1143  * hid_setup_resolution_multiplier - set up all resolution multipliers
1144  *
1145  * @device: hid device
1146  *
1147  * Search for all Resolution Multiplier Feature Reports and apply their
1148  * value to all matching Input items. This only updates the internal struct
1149  * fields.
1150  *
1151  * The Resolution Multiplier is applied by the hardware. If the multiplier
1152  * is anything other than 1, the hardware will send pre-multiplied events
1153  * so that the same physical interaction generates an accumulated
1154  *	accumulated_value = value * * multiplier
1155  * This may be achieved by sending
1156  * - "value * multiplier" for each event, or
1157  * - "value" but "multiplier" times as frequently, or
1158  * - a combination of the above
1159  * The only guarantee is that the same physical interaction always generates
1160  * an accumulated 'value * multiplier'.
1161  *
1162  * This function must be called before any event processing and after
1163  * any SetRequest to the Resolution Multiplier.
1164  */
hid_setup_resolution_multiplier(struct hid_device * hid)1165 void hid_setup_resolution_multiplier(struct hid_device *hid)
1166 {
1167 	struct hid_report_enum *rep_enum;
1168 	struct hid_report *rep;
1169 	struct hid_usage *usage;
1170 	int i, j;
1171 
1172 	rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
1173 	list_for_each_entry(rep, &rep_enum->report_list, list) {
1174 		for (i = 0; i < rep->maxfield; i++) {
1175 			/* Ignore if report count is out of bounds. */
1176 			if (rep->field[i]->report_count < 1)
1177 				continue;
1178 
1179 			for (j = 0; j < rep->field[i]->maxusage; j++) {
1180 				usage = &rep->field[i]->usage[j];
1181 				if (usage->hid == HID_GD_RESOLUTION_MULTIPLIER)
1182 					hid_apply_multiplier(hid,
1183 							     rep->field[i]);
1184 			}
1185 		}
1186 	}
1187 }
1188 EXPORT_SYMBOL_GPL(hid_setup_resolution_multiplier);
1189 
1190 /**
1191  * hid_open_report - open a driver-specific device report
1192  *
1193  * @device: hid device
1194  *
1195  * Parse a report description into a hid_device structure. Reports are
1196  * enumerated, fields are attached to these reports.
1197  * 0 returned on success, otherwise nonzero error value.
1198  *
1199  * This function (or the equivalent hid_parse() macro) should only be
1200  * called from probe() in drivers, before starting the device.
1201  */
hid_open_report(struct hid_device * device)1202 int hid_open_report(struct hid_device *device)
1203 {
1204 	struct hid_parser *parser;
1205 	struct hid_item item;
1206 	unsigned int size;
1207 	const __u8 *start;
1208 	__u8 *buf;
1209 	const __u8 *end;
1210 	const __u8 *next;
1211 	int ret;
1212 	int i;
1213 	static int (*dispatch_type[])(struct hid_parser *parser,
1214 				      struct hid_item *item) = {
1215 		hid_parser_main,
1216 		hid_parser_global,
1217 		hid_parser_local,
1218 		hid_parser_reserved
1219 	};
1220 
1221 	if (WARN_ON(device->status & HID_STAT_PARSED))
1222 		return -EBUSY;
1223 
1224 	start = device->dev_rdesc;
1225 	if (WARN_ON(!start))
1226 		return -ENODEV;
1227 	size = device->dev_rsize;
1228 
1229 	/* call_hid_bpf_rdesc_fixup() ensures we work on a copy of rdesc */
1230 	buf = call_hid_bpf_rdesc_fixup(device, start, &size);
1231 	if (buf == NULL)
1232 		return -ENOMEM;
1233 
1234 	if (device->driver->report_fixup)
1235 		start = device->driver->report_fixup(device, buf, &size);
1236 	else
1237 		start = buf;
1238 
1239 	start = kmemdup(start, size, GFP_KERNEL);
1240 	kfree(buf);
1241 	if (start == NULL)
1242 		return -ENOMEM;
1243 
1244 	device->rdesc = start;
1245 	device->rsize = size;
1246 
1247 	parser = vzalloc(sizeof(struct hid_parser));
1248 	if (!parser) {
1249 		ret = -ENOMEM;
1250 		goto alloc_err;
1251 	}
1252 
1253 	parser->device = device;
1254 
1255 	end = start + size;
1256 
1257 	device->collection = kcalloc(HID_DEFAULT_NUM_COLLECTIONS,
1258 				     sizeof(struct hid_collection), GFP_KERNEL);
1259 	if (!device->collection) {
1260 		ret = -ENOMEM;
1261 		goto err;
1262 	}
1263 	device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
1264 	for (i = 0; i < HID_DEFAULT_NUM_COLLECTIONS; i++)
1265 		device->collection[i].parent_idx = -1;
1266 
1267 	ret = -EINVAL;
1268 	while ((next = fetch_item(start, end, &item)) != NULL) {
1269 		start = next;
1270 
1271 		if (item.format != HID_ITEM_FORMAT_SHORT) {
1272 			hid_err(device, "unexpected long global item\n");
1273 			goto err;
1274 		}
1275 
1276 		if (dispatch_type[item.type](parser, &item)) {
1277 			hid_err(device, "item %u %u %u %u parsing failed\n",
1278 				item.format, (unsigned)item.size,
1279 				(unsigned)item.type, (unsigned)item.tag);
1280 			goto err;
1281 		}
1282 
1283 		if (start == end) {
1284 			if (parser->collection_stack_ptr) {
1285 				hid_err(device, "unbalanced collection at end of report description\n");
1286 				goto err;
1287 			}
1288 			if (parser->local.delimiter_depth) {
1289 				hid_err(device, "unbalanced delimiter at end of report description\n");
1290 				goto err;
1291 			}
1292 
1293 			/*
1294 			 * fetch initial values in case the device's
1295 			 * default multiplier isn't the recommended 1
1296 			 */
1297 			hid_setup_resolution_multiplier(device);
1298 
1299 			kfree(parser->collection_stack);
1300 			vfree(parser);
1301 			device->status |= HID_STAT_PARSED;
1302 
1303 			return 0;
1304 		}
1305 	}
1306 
1307 	hid_err(device, "item fetching failed at offset %u/%u\n",
1308 		size - (unsigned int)(end - start), size);
1309 err:
1310 	kfree(parser->collection_stack);
1311 alloc_err:
1312 	vfree(parser);
1313 	hid_close_report(device);
1314 	return ret;
1315 }
1316 EXPORT_SYMBOL_GPL(hid_open_report);
1317 
1318 /*
1319  * Convert a signed n-bit integer to signed 32-bit integer. Common
1320  * cases are done through the compiler, the screwed things has to be
1321  * done by hand.
1322  */
1323 
snto32(__u32 value,unsigned n)1324 static s32 snto32(__u32 value, unsigned n)
1325 {
1326 	if (!value || !n)
1327 		return 0;
1328 
1329 	if (n > 32)
1330 		n = 32;
1331 
1332 	switch (n) {
1333 	case 8:  return ((__s8)value);
1334 	case 16: return ((__s16)value);
1335 	case 32: return ((__s32)value);
1336 	}
1337 	return value & (1 << (n - 1)) ? value | (~0U << n) : value;
1338 }
1339 
hid_snto32(__u32 value,unsigned n)1340 s32 hid_snto32(__u32 value, unsigned n)
1341 {
1342 	return snto32(value, n);
1343 }
1344 EXPORT_SYMBOL_GPL(hid_snto32);
1345 
1346 /*
1347  * Convert a signed 32-bit integer to a signed n-bit integer.
1348  */
1349 
s32ton(__s32 value,unsigned n)1350 static u32 s32ton(__s32 value, unsigned n)
1351 {
1352 	s32 a = value >> (n - 1);
1353 	if (a && a != -1)
1354 		return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1;
1355 	return value & ((1 << n) - 1);
1356 }
1357 
1358 /*
1359  * Extract/implement a data field from/to a little endian report (bit array).
1360  *
1361  * Code sort-of follows HID spec:
1362  *     http://www.usb.org/developers/hidpage/HID1_11.pdf
1363  *
1364  * While the USB HID spec allows unlimited length bit fields in "report
1365  * descriptors", most devices never use more than 16 bits.
1366  * One model of UPS is claimed to report "LINEV" as a 32-bit field.
1367  * Search linux-kernel and linux-usb-devel archives for "hid-core extract".
1368  */
1369 
__extract(u8 * report,unsigned offset,int n)1370 static u32 __extract(u8 *report, unsigned offset, int n)
1371 {
1372 	unsigned int idx = offset / 8;
1373 	unsigned int bit_nr = 0;
1374 	unsigned int bit_shift = offset % 8;
1375 	int bits_to_copy = 8 - bit_shift;
1376 	u32 value = 0;
1377 	u32 mask = n < 32 ? (1U << n) - 1 : ~0U;
1378 
1379 	while (n > 0) {
1380 		value |= ((u32)report[idx] >> bit_shift) << bit_nr;
1381 		n -= bits_to_copy;
1382 		bit_nr += bits_to_copy;
1383 		bits_to_copy = 8;
1384 		bit_shift = 0;
1385 		idx++;
1386 	}
1387 
1388 	return value & mask;
1389 }
1390 
hid_field_extract(const struct hid_device * hid,u8 * report,unsigned offset,unsigned n)1391 u32 hid_field_extract(const struct hid_device *hid, u8 *report,
1392 			unsigned offset, unsigned n)
1393 {
1394 	if (n > 32) {
1395 		hid_warn_once(hid, "%s() called with n (%d) > 32! (%s)\n",
1396 			      __func__, n, current->comm);
1397 		n = 32;
1398 	}
1399 
1400 	return __extract(report, offset, n);
1401 }
1402 EXPORT_SYMBOL_GPL(hid_field_extract);
1403 
1404 /*
1405  * "implement" : set bits in a little endian bit stream.
1406  * Same concepts as "extract" (see comments above).
1407  * The data mangled in the bit stream remains in little endian
1408  * order the whole time. It make more sense to talk about
1409  * endianness of register values by considering a register
1410  * a "cached" copy of the little endian bit stream.
1411  */
1412 
__implement(u8 * report,unsigned offset,int n,u32 value)1413 static void __implement(u8 *report, unsigned offset, int n, u32 value)
1414 {
1415 	unsigned int idx = offset / 8;
1416 	unsigned int bit_shift = offset % 8;
1417 	int bits_to_set = 8 - bit_shift;
1418 
1419 	while (n - bits_to_set >= 0) {
1420 		report[idx] &= ~(0xff << bit_shift);
1421 		report[idx] |= value << bit_shift;
1422 		value >>= bits_to_set;
1423 		n -= bits_to_set;
1424 		bits_to_set = 8;
1425 		bit_shift = 0;
1426 		idx++;
1427 	}
1428 
1429 	/* last nibble */
1430 	if (n) {
1431 		u8 bit_mask = ((1U << n) - 1);
1432 		report[idx] &= ~(bit_mask << bit_shift);
1433 		report[idx] |= value << bit_shift;
1434 	}
1435 }
1436 
implement(const struct hid_device * hid,u8 * report,unsigned offset,unsigned n,u32 value)1437 static void implement(const struct hid_device *hid, u8 *report,
1438 		      unsigned offset, unsigned n, u32 value)
1439 {
1440 	if (unlikely(n > 32)) {
1441 		hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n",
1442 			 __func__, n, current->comm);
1443 		n = 32;
1444 	} else if (n < 32) {
1445 		u32 m = (1U << n) - 1;
1446 
1447 		if (unlikely(value > m)) {
1448 			hid_warn(hid,
1449 				 "%s() called with too large value %d (n: %d)! (%s)\n",
1450 				 __func__, value, n, current->comm);
1451 			value &= m;
1452 		}
1453 	}
1454 
1455 	__implement(report, offset, n, value);
1456 }
1457 
1458 /*
1459  * Search an array for a value.
1460  */
1461 
search(__s32 * array,__s32 value,unsigned n)1462 static int search(__s32 *array, __s32 value, unsigned n)
1463 {
1464 	while (n--) {
1465 		if (*array++ == value)
1466 			return 0;
1467 	}
1468 	return -1;
1469 }
1470 
1471 /**
1472  * hid_match_report - check if driver's raw_event should be called
1473  *
1474  * @hid: hid device
1475  * @report: hid report to match against
1476  *
1477  * compare hid->driver->report_table->report_type to report->type
1478  */
hid_match_report(struct hid_device * hid,struct hid_report * report)1479 static int hid_match_report(struct hid_device *hid, struct hid_report *report)
1480 {
1481 	const struct hid_report_id *id = hid->driver->report_table;
1482 
1483 	if (!id) /* NULL means all */
1484 		return 1;
1485 
1486 	for (; id->report_type != HID_TERMINATOR; id++)
1487 		if (id->report_type == HID_ANY_ID ||
1488 				id->report_type == report->type)
1489 			return 1;
1490 	return 0;
1491 }
1492 
1493 /**
1494  * hid_match_usage - check if driver's event should be called
1495  *
1496  * @hid: hid device
1497  * @usage: usage to match against
1498  *
1499  * compare hid->driver->usage_table->usage_{type,code} to
1500  * usage->usage_{type,code}
1501  */
hid_match_usage(struct hid_device * hid,struct hid_usage * usage)1502 static int hid_match_usage(struct hid_device *hid, struct hid_usage *usage)
1503 {
1504 	const struct hid_usage_id *id = hid->driver->usage_table;
1505 
1506 	if (!id) /* NULL means all */
1507 		return 1;
1508 
1509 	for (; id->usage_type != HID_ANY_ID - 1; id++)
1510 		if ((id->usage_hid == HID_ANY_ID ||
1511 				id->usage_hid == usage->hid) &&
1512 				(id->usage_type == HID_ANY_ID ||
1513 				id->usage_type == usage->type) &&
1514 				(id->usage_code == HID_ANY_ID ||
1515 				 id->usage_code == usage->code))
1516 			return 1;
1517 	return 0;
1518 }
1519 
hid_process_event(struct hid_device * hid,struct hid_field * field,struct hid_usage * usage,__s32 value,int interrupt)1520 static void hid_process_event(struct hid_device *hid, struct hid_field *field,
1521 		struct hid_usage *usage, __s32 value, int interrupt)
1522 {
1523 	struct hid_driver *hdrv = hid->driver;
1524 	int ret;
1525 
1526 	if (!list_empty(&hid->debug_list))
1527 		hid_dump_input(hid, usage, value);
1528 
1529 	if (hdrv && hdrv->event && hid_match_usage(hid, usage)) {
1530 		ret = hdrv->event(hid, field, usage, value);
1531 		if (ret != 0) {
1532 			if (ret < 0)
1533 				hid_err(hid, "%s's event failed with %d\n",
1534 						hdrv->name, ret);
1535 			return;
1536 		}
1537 	}
1538 
1539 	if (hid->claimed & HID_CLAIMED_INPUT)
1540 		hidinput_hid_event(hid, field, usage, value);
1541 	if (hid->claimed & HID_CLAIMED_HIDDEV && interrupt && hid->hiddev_hid_event)
1542 		hid->hiddev_hid_event(hid, field, usage, value);
1543 }
1544 
1545 /*
1546  * Checks if the given value is valid within this field
1547  */
hid_array_value_is_valid(struct hid_field * field,__s32 value)1548 static inline int hid_array_value_is_valid(struct hid_field *field,
1549 					   __s32 value)
1550 {
1551 	__s32 min = field->logical_minimum;
1552 
1553 	/*
1554 	 * Value needs to be between logical min and max, and
1555 	 * (value - min) is used as an index in the usage array.
1556 	 * This array is of size field->maxusage
1557 	 */
1558 	return value >= min &&
1559 	       value <= field->logical_maximum &&
1560 	       value - min < field->maxusage;
1561 }
1562 
1563 /*
1564  * Fetch the field from the data. The field content is stored for next
1565  * report processing (we do differential reporting to the layer).
1566  */
hid_input_fetch_field(struct hid_device * hid,struct hid_field * field,__u8 * data)1567 static void hid_input_fetch_field(struct hid_device *hid,
1568 				  struct hid_field *field,
1569 				  __u8 *data)
1570 {
1571 	unsigned n;
1572 	unsigned count = field->report_count;
1573 	unsigned offset = field->report_offset;
1574 	unsigned size = field->report_size;
1575 	__s32 min = field->logical_minimum;
1576 	__s32 *value;
1577 
1578 	value = field->new_value;
1579 	memset(value, 0, count * sizeof(__s32));
1580 	field->ignored = false;
1581 
1582 	for (n = 0; n < count; n++) {
1583 
1584 		value[n] = min < 0 ?
1585 			snto32(hid_field_extract(hid, data, offset + n * size,
1586 			       size), size) :
1587 			hid_field_extract(hid, data, offset + n * size, size);
1588 
1589 		/* Ignore report if ErrorRollOver */
1590 		if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
1591 		    hid_array_value_is_valid(field, value[n]) &&
1592 		    field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1) {
1593 			field->ignored = true;
1594 			return;
1595 		}
1596 	}
1597 }
1598 
1599 /*
1600  * Process a received variable field.
1601  */
1602 
hid_input_var_field(struct hid_device * hid,struct hid_field * field,int interrupt)1603 static void hid_input_var_field(struct hid_device *hid,
1604 				struct hid_field *field,
1605 				int interrupt)
1606 {
1607 	unsigned int count = field->report_count;
1608 	__s32 *value = field->new_value;
1609 	unsigned int n;
1610 
1611 	for (n = 0; n < count; n++)
1612 		hid_process_event(hid,
1613 				  field,
1614 				  &field->usage[n],
1615 				  value[n],
1616 				  interrupt);
1617 
1618 	memcpy(field->value, value, count * sizeof(__s32));
1619 }
1620 
1621 /*
1622  * Process a received array field. The field content is stored for
1623  * next report processing (we do differential reporting to the layer).
1624  */
1625 
hid_input_array_field(struct hid_device * hid,struct hid_field * field,int interrupt)1626 static void hid_input_array_field(struct hid_device *hid,
1627 				  struct hid_field *field,
1628 				  int interrupt)
1629 {
1630 	unsigned int n;
1631 	unsigned int count = field->report_count;
1632 	__s32 min = field->logical_minimum;
1633 	__s32 *value;
1634 
1635 	value = field->new_value;
1636 
1637 	/* ErrorRollOver */
1638 	if (field->ignored)
1639 		return;
1640 
1641 	for (n = 0; n < count; n++) {
1642 		if (hid_array_value_is_valid(field, field->value[n]) &&
1643 		    search(value, field->value[n], count))
1644 			hid_process_event(hid,
1645 					  field,
1646 					  &field->usage[field->value[n] - min],
1647 					  0,
1648 					  interrupt);
1649 
1650 		if (hid_array_value_is_valid(field, value[n]) &&
1651 		    search(field->value, value[n], count))
1652 			hid_process_event(hid,
1653 					  field,
1654 					  &field->usage[value[n] - min],
1655 					  1,
1656 					  interrupt);
1657 	}
1658 
1659 	memcpy(field->value, value, count * sizeof(__s32));
1660 }
1661 
1662 /*
1663  * Analyse a received report, and fetch the data from it. The field
1664  * content is stored for next report processing (we do differential
1665  * reporting to the layer).
1666  */
hid_process_report(struct hid_device * hid,struct hid_report * report,__u8 * data,int interrupt)1667 static void hid_process_report(struct hid_device *hid,
1668 			       struct hid_report *report,
1669 			       __u8 *data,
1670 			       int interrupt)
1671 {
1672 	unsigned int a;
1673 	struct hid_field_entry *entry;
1674 	struct hid_field *field;
1675 
1676 	/* first retrieve all incoming values in data */
1677 	for (a = 0; a < report->maxfield; a++)
1678 		hid_input_fetch_field(hid, report->field[a], data);
1679 
1680 	if (!list_empty(&report->field_entry_list)) {
1681 		/* INPUT_REPORT, we have a priority list of fields */
1682 		list_for_each_entry(entry,
1683 				    &report->field_entry_list,
1684 				    list) {
1685 			field = entry->field;
1686 
1687 			if (field->flags & HID_MAIN_ITEM_VARIABLE)
1688 				hid_process_event(hid,
1689 						  field,
1690 						  &field->usage[entry->index],
1691 						  field->new_value[entry->index],
1692 						  interrupt);
1693 			else
1694 				hid_input_array_field(hid, field, interrupt);
1695 		}
1696 
1697 		/* we need to do the memcpy at the end for var items */
1698 		for (a = 0; a < report->maxfield; a++) {
1699 			field = report->field[a];
1700 
1701 			if (field->flags & HID_MAIN_ITEM_VARIABLE)
1702 				memcpy(field->value, field->new_value,
1703 				       field->report_count * sizeof(__s32));
1704 		}
1705 	} else {
1706 		/* FEATURE_REPORT, regular processing */
1707 		for (a = 0; a < report->maxfield; a++) {
1708 			field = report->field[a];
1709 
1710 			if (field->flags & HID_MAIN_ITEM_VARIABLE)
1711 				hid_input_var_field(hid, field, interrupt);
1712 			else
1713 				hid_input_array_field(hid, field, interrupt);
1714 		}
1715 	}
1716 }
1717 
1718 /*
1719  * Insert a given usage_index in a field in the list
1720  * of processed usages in the report.
1721  *
1722  * The elements of lower priority score are processed
1723  * first.
1724  */
__hid_insert_field_entry(struct hid_device * hid,struct hid_report * report,struct hid_field_entry * entry,struct hid_field * field,unsigned int usage_index)1725 static void __hid_insert_field_entry(struct hid_device *hid,
1726 				     struct hid_report *report,
1727 				     struct hid_field_entry *entry,
1728 				     struct hid_field *field,
1729 				     unsigned int usage_index)
1730 {
1731 	struct hid_field_entry *next;
1732 
1733 	entry->field = field;
1734 	entry->index = usage_index;
1735 	entry->priority = field->usages_priorities[usage_index];
1736 
1737 	/* insert the element at the correct position */
1738 	list_for_each_entry(next,
1739 			    &report->field_entry_list,
1740 			    list) {
1741 		/*
1742 		 * the priority of our element is strictly higher
1743 		 * than the next one, insert it before
1744 		 */
1745 		if (entry->priority > next->priority) {
1746 			list_add_tail(&entry->list, &next->list);
1747 			return;
1748 		}
1749 	}
1750 
1751 	/* lowest priority score: insert at the end */
1752 	list_add_tail(&entry->list, &report->field_entry_list);
1753 }
1754 
hid_report_process_ordering(struct hid_device * hid,struct hid_report * report)1755 static void hid_report_process_ordering(struct hid_device *hid,
1756 					struct hid_report *report)
1757 {
1758 	struct hid_field *field;
1759 	struct hid_field_entry *entries;
1760 	unsigned int a, u, usages;
1761 	unsigned int count = 0;
1762 
1763 	/* count the number of individual fields in the report */
1764 	for (a = 0; a < report->maxfield; a++) {
1765 		field = report->field[a];
1766 
1767 		if (field->flags & HID_MAIN_ITEM_VARIABLE)
1768 			count += field->report_count;
1769 		else
1770 			count++;
1771 	}
1772 
1773 	/* allocate the memory to process the fields */
1774 	entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
1775 	if (!entries)
1776 		return;
1777 
1778 	report->field_entries = entries;
1779 
1780 	/*
1781 	 * walk through all fields in the report and
1782 	 * store them by priority order in report->field_entry_list
1783 	 *
1784 	 * - Var elements are individualized (field + usage_index)
1785 	 * - Arrays are taken as one, we can not chose an order for them
1786 	 */
1787 	usages = 0;
1788 	for (a = 0; a < report->maxfield; a++) {
1789 		field = report->field[a];
1790 
1791 		if (field->flags & HID_MAIN_ITEM_VARIABLE) {
1792 			for (u = 0; u < field->report_count; u++) {
1793 				__hid_insert_field_entry(hid, report,
1794 							 &entries[usages],
1795 							 field, u);
1796 				usages++;
1797 			}
1798 		} else {
1799 			__hid_insert_field_entry(hid, report, &entries[usages],
1800 						 field, 0);
1801 			usages++;
1802 		}
1803 	}
1804 }
1805 
hid_process_ordering(struct hid_device * hid)1806 static void hid_process_ordering(struct hid_device *hid)
1807 {
1808 	struct hid_report *report;
1809 	struct hid_report_enum *report_enum = &hid->report_enum[HID_INPUT_REPORT];
1810 
1811 	list_for_each_entry(report, &report_enum->report_list, list)
1812 		hid_report_process_ordering(hid, report);
1813 }
1814 
1815 /*
1816  * Output the field into the report.
1817  */
1818 
hid_output_field(const struct hid_device * hid,struct hid_field * field,__u8 * data)1819 static void hid_output_field(const struct hid_device *hid,
1820 			     struct hid_field *field, __u8 *data)
1821 {
1822 	unsigned count = field->report_count;
1823 	unsigned offset = field->report_offset;
1824 	unsigned size = field->report_size;
1825 	unsigned n;
1826 
1827 	for (n = 0; n < count; n++) {
1828 		if (field->logical_minimum < 0)	/* signed values */
1829 			implement(hid, data, offset + n * size, size,
1830 				  s32ton(field->value[n], size));
1831 		else				/* unsigned values */
1832 			implement(hid, data, offset + n * size, size,
1833 				  field->value[n]);
1834 	}
1835 }
1836 
1837 /*
1838  * Compute the size of a report.
1839  */
hid_compute_report_size(struct hid_report * report)1840 static size_t hid_compute_report_size(struct hid_report *report)
1841 {
1842 	if (report->size)
1843 		return ((report->size - 1) >> 3) + 1;
1844 
1845 	return 0;
1846 }
1847 
1848 /*
1849  * Create a report. 'data' has to be allocated using
1850  * hid_alloc_report_buf() so that it has proper size.
1851  */
1852 
hid_output_report(struct hid_report * report,__u8 * data)1853 void hid_output_report(struct hid_report *report, __u8 *data)
1854 {
1855 	unsigned n;
1856 
1857 	if (report->id > 0)
1858 		*data++ = report->id;
1859 
1860 	memset(data, 0, hid_compute_report_size(report));
1861 	for (n = 0; n < report->maxfield; n++)
1862 		hid_output_field(report->device, report->field[n], data);
1863 }
1864 EXPORT_SYMBOL_GPL(hid_output_report);
1865 
1866 /*
1867  * Allocator for buffer that is going to be passed to hid_output_report()
1868  */
hid_alloc_report_buf(struct hid_report * report,gfp_t flags)1869 u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
1870 {
1871 	/*
1872 	 * 7 extra bytes are necessary to achieve proper functionality
1873 	 * of implement() working on 8 byte chunks
1874 	 */
1875 
1876 	u32 len = hid_report_len(report) + 7;
1877 
1878 	return kzalloc(len, flags);
1879 }
1880 EXPORT_SYMBOL_GPL(hid_alloc_report_buf);
1881 
1882 /*
1883  * Set a field value. The report this field belongs to has to be
1884  * created and transferred to the device, to set this value in the
1885  * device.
1886  */
1887 
hid_set_field(struct hid_field * field,unsigned offset,__s32 value)1888 int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
1889 {
1890 	unsigned size;
1891 
1892 	if (!field)
1893 		return -1;
1894 
1895 	size = field->report_size;
1896 
1897 	hid_dump_input(field->report->device, field->usage + offset, value);
1898 
1899 	if (offset >= field->report_count) {
1900 		hid_err(field->report->device, "offset (%d) exceeds report_count (%d)\n",
1901 				offset, field->report_count);
1902 		return -1;
1903 	}
1904 	if (field->logical_minimum < 0) {
1905 		if (value != snto32(s32ton(value, size), size)) {
1906 			hid_err(field->report->device, "value %d is out of range\n", value);
1907 			return -1;
1908 		}
1909 	}
1910 	field->value[offset] = value;
1911 	return 0;
1912 }
1913 EXPORT_SYMBOL_GPL(hid_set_field);
1914 
hid_find_field(struct hid_device * hdev,unsigned int report_type,unsigned int application,unsigned int usage)1915 struct hid_field *hid_find_field(struct hid_device *hdev, unsigned int report_type,
1916 				 unsigned int application, unsigned int usage)
1917 {
1918 	struct list_head *report_list = &hdev->report_enum[report_type].report_list;
1919 	struct hid_report *report;
1920 	int i, j;
1921 
1922 	list_for_each_entry(report, report_list, list) {
1923 		if (report->application != application)
1924 			continue;
1925 
1926 		for (i = 0; i < report->maxfield; i++) {
1927 			struct hid_field *field = report->field[i];
1928 
1929 			for (j = 0; j < field->maxusage; j++) {
1930 				if (field->usage[j].hid == usage)
1931 					return field;
1932 			}
1933 		}
1934 	}
1935 
1936 	return NULL;
1937 }
1938 EXPORT_SYMBOL_GPL(hid_find_field);
1939 
hid_get_report(struct hid_report_enum * report_enum,const u8 * data)1940 static struct hid_report *hid_get_report(struct hid_report_enum *report_enum,
1941 		const u8 *data)
1942 {
1943 	struct hid_report *report;
1944 	unsigned int n = 0;	/* Normally report number is 0 */
1945 
1946 	/* Device uses numbered reports, data[0] is report number */
1947 	if (report_enum->numbered)
1948 		n = *data;
1949 
1950 	report = report_enum->report_id_hash[n];
1951 	if (report == NULL)
1952 		dbg_hid("undefined report_id %u received\n", n);
1953 
1954 	return report;
1955 }
1956 
1957 /*
1958  * Implement a generic .request() callback, using .raw_request()
1959  * DO NOT USE in hid drivers directly, but through hid_hw_request instead.
1960  */
__hid_request(struct hid_device * hid,struct hid_report * report,enum hid_class_request reqtype)1961 int __hid_request(struct hid_device *hid, struct hid_report *report,
1962 		enum hid_class_request reqtype)
1963 {
1964 	char *buf;
1965 	int ret;
1966 	u32 len;
1967 
1968 	buf = hid_alloc_report_buf(report, GFP_KERNEL);
1969 	if (!buf)
1970 		return -ENOMEM;
1971 
1972 	len = hid_report_len(report);
1973 
1974 	if (reqtype == HID_REQ_SET_REPORT)
1975 		hid_output_report(report, buf);
1976 
1977 	ret = hid->ll_driver->raw_request(hid, report->id, buf, len,
1978 					  report->type, reqtype);
1979 	if (ret < 0) {
1980 		dbg_hid("unable to complete request: %d\n", ret);
1981 		goto out;
1982 	}
1983 
1984 	if (reqtype == HID_REQ_GET_REPORT)
1985 		hid_input_report(hid, report->type, buf, ret, 0);
1986 
1987 	ret = 0;
1988 
1989 out:
1990 	kfree(buf);
1991 	return ret;
1992 }
1993 EXPORT_SYMBOL_GPL(__hid_request);
1994 
hid_report_raw_event(struct hid_device * hid,enum hid_report_type type,u8 * data,u32 size,int interrupt)1995 int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
1996 			 int interrupt)
1997 {
1998 	struct hid_report_enum *report_enum = hid->report_enum + type;
1999 	struct hid_report *report;
2000 	struct hid_driver *hdrv;
2001 	int max_buffer_size = HID_MAX_BUFFER_SIZE;
2002 	u32 rsize, csize = size;
2003 	u8 *cdata = data;
2004 	int ret = 0;
2005 
2006 	report = hid_get_report(report_enum, data);
2007 	if (!report)
2008 		goto out;
2009 
2010 	if (report_enum->numbered) {
2011 		cdata++;
2012 		csize--;
2013 	}
2014 
2015 	rsize = hid_compute_report_size(report);
2016 
2017 	if (hid->ll_driver->max_buffer_size)
2018 		max_buffer_size = hid->ll_driver->max_buffer_size;
2019 
2020 	if (report_enum->numbered && rsize >= max_buffer_size)
2021 		rsize = max_buffer_size - 1;
2022 	else if (rsize > max_buffer_size)
2023 		rsize = max_buffer_size;
2024 
2025 	if (csize < rsize) {
2026 		dbg_hid("report %d is too short, (%d < %d)\n", report->id,
2027 				csize, rsize);
2028 		memset(cdata + csize, 0, rsize - csize);
2029 	}
2030 
2031 	if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event)
2032 		hid->hiddev_report_event(hid, report);
2033 	if (hid->claimed & HID_CLAIMED_HIDRAW) {
2034 		ret = hidraw_report_event(hid, data, size);
2035 		if (ret)
2036 			goto out;
2037 	}
2038 
2039 	if (hid->claimed != HID_CLAIMED_HIDRAW && report->maxfield) {
2040 		hid_process_report(hid, report, cdata, interrupt);
2041 		hdrv = hid->driver;
2042 		if (hdrv && hdrv->report)
2043 			hdrv->report(hid, report);
2044 	}
2045 
2046 	if (hid->claimed & HID_CLAIMED_INPUT)
2047 		hidinput_report_event(hid, report);
2048 out:
2049 	return ret;
2050 }
2051 EXPORT_SYMBOL_GPL(hid_report_raw_event);
2052 
2053 
__hid_input_report(struct hid_device * hid,enum hid_report_type type,u8 * data,u32 size,int interrupt,u64 source,bool from_bpf,bool lock_already_taken)2054 static int __hid_input_report(struct hid_device *hid, enum hid_report_type type,
2055 			      u8 *data, u32 size, int interrupt, u64 source, bool from_bpf,
2056 			      bool lock_already_taken)
2057 {
2058 	struct hid_report_enum *report_enum;
2059 	struct hid_driver *hdrv;
2060 	struct hid_report *report;
2061 	int ret = 0;
2062 
2063 	if (!hid)
2064 		return -ENODEV;
2065 
2066 	ret = down_trylock(&hid->driver_input_lock);
2067 	if (lock_already_taken && !ret) {
2068 		up(&hid->driver_input_lock);
2069 		return -EINVAL;
2070 	} else if (!lock_already_taken && ret) {
2071 		return -EBUSY;
2072 	}
2073 
2074 	if (!hid->driver) {
2075 		ret = -ENODEV;
2076 		goto unlock;
2077 	}
2078 	report_enum = hid->report_enum + type;
2079 	hdrv = hid->driver;
2080 
2081 	data = dispatch_hid_bpf_device_event(hid, type, data, &size, interrupt, source, from_bpf);
2082 	if (IS_ERR(data)) {
2083 		ret = PTR_ERR(data);
2084 		goto unlock;
2085 	}
2086 
2087 	if (!size) {
2088 		dbg_hid("empty report\n");
2089 		ret = -1;
2090 		goto unlock;
2091 	}
2092 
2093 	/* Avoid unnecessary overhead if debugfs is disabled */
2094 	if (!list_empty(&hid->debug_list))
2095 		hid_dump_report(hid, type, data, size);
2096 
2097 	report = hid_get_report(report_enum, data);
2098 
2099 	if (!report) {
2100 		ret = -1;
2101 		goto unlock;
2102 	}
2103 
2104 	if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) {
2105 		ret = hdrv->raw_event(hid, report, data, size);
2106 		if (ret < 0)
2107 			goto unlock;
2108 	}
2109 
2110 	ret = hid_report_raw_event(hid, type, data, size, interrupt);
2111 
2112 unlock:
2113 	if (!lock_already_taken)
2114 		up(&hid->driver_input_lock);
2115 	return ret;
2116 }
2117 
2118 /**
2119  * hid_input_report - report data from lower layer (usb, bt...)
2120  *
2121  * @hid: hid device
2122  * @type: HID report type (HID_*_REPORT)
2123  * @data: report contents
2124  * @size: size of data parameter
2125  * @interrupt: distinguish between interrupt and control transfers
2126  *
2127  * This is data entry for lower layers.
2128  */
hid_input_report(struct hid_device * hid,enum hid_report_type type,u8 * data,u32 size,int interrupt)2129 int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
2130 		     int interrupt)
2131 {
2132 	return __hid_input_report(hid, type, data, size, interrupt, 0,
2133 				  false, /* from_bpf */
2134 				  false /* lock_already_taken */);
2135 }
2136 EXPORT_SYMBOL_GPL(hid_input_report);
2137 
hid_match_one_id(const struct hid_device * hdev,const struct hid_device_id * id)2138 bool hid_match_one_id(const struct hid_device *hdev,
2139 		      const struct hid_device_id *id)
2140 {
2141 	return (id->bus == HID_BUS_ANY || id->bus == hdev->bus) &&
2142 		(id->group == HID_GROUP_ANY || id->group == hdev->group) &&
2143 		(id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) &&
2144 		(id->product == HID_ANY_ID || id->product == hdev->product);
2145 }
2146 
hid_match_id(const struct hid_device * hdev,const struct hid_device_id * id)2147 const struct hid_device_id *hid_match_id(const struct hid_device *hdev,
2148 		const struct hid_device_id *id)
2149 {
2150 	for (; id->bus; id++)
2151 		if (hid_match_one_id(hdev, id))
2152 			return id;
2153 
2154 	return NULL;
2155 }
2156 EXPORT_SYMBOL_GPL(hid_match_id);
2157 
2158 static const struct hid_device_id hid_hiddev_list[] = {
2159 	{ HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS) },
2160 	{ HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS1) },
2161 	{ }
2162 };
2163 
hid_hiddev(struct hid_device * hdev)2164 static bool hid_hiddev(struct hid_device *hdev)
2165 {
2166 	return !!hid_match_id(hdev, hid_hiddev_list);
2167 }
2168 
2169 
2170 static ssize_t
read_report_descriptor(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t off,size_t count)2171 read_report_descriptor(struct file *filp, struct kobject *kobj,
2172 		struct bin_attribute *attr,
2173 		char *buf, loff_t off, size_t count)
2174 {
2175 	struct device *dev = kobj_to_dev(kobj);
2176 	struct hid_device *hdev = to_hid_device(dev);
2177 
2178 	if (off >= hdev->rsize)
2179 		return 0;
2180 
2181 	if (off + count > hdev->rsize)
2182 		count = hdev->rsize - off;
2183 
2184 	memcpy(buf, hdev->rdesc + off, count);
2185 
2186 	return count;
2187 }
2188 
2189 static ssize_t
show_country(struct device * dev,struct device_attribute * attr,char * buf)2190 show_country(struct device *dev, struct device_attribute *attr,
2191 		char *buf)
2192 {
2193 	struct hid_device *hdev = to_hid_device(dev);
2194 
2195 	return sprintf(buf, "%02x\n", hdev->country & 0xff);
2196 }
2197 
2198 static struct bin_attribute dev_bin_attr_report_desc = {
2199 	.attr = { .name = "report_descriptor", .mode = 0444 },
2200 	.read = read_report_descriptor,
2201 	.size = HID_MAX_DESCRIPTOR_SIZE,
2202 };
2203 
2204 static const struct device_attribute dev_attr_country = {
2205 	.attr = { .name = "country", .mode = 0444 },
2206 	.show = show_country,
2207 };
2208 
hid_connect(struct hid_device * hdev,unsigned int connect_mask)2209 int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
2210 {
2211 	static const char *types[] = { "Device", "Pointer", "Mouse", "Device",
2212 		"Joystick", "Gamepad", "Keyboard", "Keypad",
2213 		"Multi-Axis Controller"
2214 	};
2215 	const char *type, *bus;
2216 	char buf[64] = "";
2217 	unsigned int i;
2218 	int len;
2219 	int ret;
2220 
2221 	ret = hid_bpf_connect_device(hdev);
2222 	if (ret)
2223 		return ret;
2224 
2225 	if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE)
2226 		connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV);
2227 	if (hdev->quirks & HID_QUIRK_HIDINPUT_FORCE)
2228 		connect_mask |= HID_CONNECT_HIDINPUT_FORCE;
2229 	if (hdev->bus != BUS_USB)
2230 		connect_mask &= ~HID_CONNECT_HIDDEV;
2231 	if (hid_hiddev(hdev))
2232 		connect_mask |= HID_CONNECT_HIDDEV_FORCE;
2233 
2234 	if ((connect_mask & HID_CONNECT_HIDINPUT) && !hidinput_connect(hdev,
2235 				connect_mask & HID_CONNECT_HIDINPUT_FORCE))
2236 		hdev->claimed |= HID_CLAIMED_INPUT;
2237 
2238 	if ((connect_mask & HID_CONNECT_HIDDEV) && hdev->hiddev_connect &&
2239 			!hdev->hiddev_connect(hdev,
2240 				connect_mask & HID_CONNECT_HIDDEV_FORCE))
2241 		hdev->claimed |= HID_CLAIMED_HIDDEV;
2242 	if ((connect_mask & HID_CONNECT_HIDRAW) && !hidraw_connect(hdev))
2243 		hdev->claimed |= HID_CLAIMED_HIDRAW;
2244 
2245 	if (connect_mask & HID_CONNECT_DRIVER)
2246 		hdev->claimed |= HID_CLAIMED_DRIVER;
2247 
2248 	/* Drivers with the ->raw_event callback set are not required to connect
2249 	 * to any other listener. */
2250 	if (!hdev->claimed && !hdev->driver->raw_event) {
2251 		hid_err(hdev, "device has no listeners, quitting\n");
2252 		return -ENODEV;
2253 	}
2254 
2255 	hid_process_ordering(hdev);
2256 
2257 	if ((hdev->claimed & HID_CLAIMED_INPUT) &&
2258 			(connect_mask & HID_CONNECT_FF) && hdev->ff_init)
2259 		hdev->ff_init(hdev);
2260 
2261 	len = 0;
2262 	if (hdev->claimed & HID_CLAIMED_INPUT)
2263 		len += sprintf(buf + len, "input");
2264 	if (hdev->claimed & HID_CLAIMED_HIDDEV)
2265 		len += sprintf(buf + len, "%shiddev%d", len ? "," : "",
2266 				((struct hiddev *)hdev->hiddev)->minor);
2267 	if (hdev->claimed & HID_CLAIMED_HIDRAW)
2268 		len += sprintf(buf + len, "%shidraw%d", len ? "," : "",
2269 				((struct hidraw *)hdev->hidraw)->minor);
2270 
2271 	type = "Device";
2272 	for (i = 0; i < hdev->maxcollection; i++) {
2273 		struct hid_collection *col = &hdev->collection[i];
2274 		if (col->type == HID_COLLECTION_APPLICATION &&
2275 		   (col->usage & HID_USAGE_PAGE) == HID_UP_GENDESK &&
2276 		   (col->usage & 0xffff) < ARRAY_SIZE(types)) {
2277 			type = types[col->usage & 0xffff];
2278 			break;
2279 		}
2280 	}
2281 
2282 	switch (hdev->bus) {
2283 	case BUS_USB:
2284 		bus = "USB";
2285 		break;
2286 	case BUS_BLUETOOTH:
2287 		bus = "BLUETOOTH";
2288 		break;
2289 	case BUS_I2C:
2290 		bus = "I2C";
2291 		break;
2292 	case BUS_VIRTUAL:
2293 		bus = "VIRTUAL";
2294 		break;
2295 	case BUS_INTEL_ISHTP:
2296 	case BUS_AMD_SFH:
2297 		bus = "SENSOR HUB";
2298 		break;
2299 	default:
2300 		bus = "<UNKNOWN>";
2301 	}
2302 
2303 	ret = device_create_file(&hdev->dev, &dev_attr_country);
2304 	if (ret)
2305 		hid_warn(hdev,
2306 			 "can't create sysfs country code attribute err: %d\n", ret);
2307 
2308 	hid_info(hdev, "%s: %s HID v%x.%02x %s [%s] on %s\n",
2309 		 buf, bus, hdev->version >> 8, hdev->version & 0xff,
2310 		 type, hdev->name, hdev->phys);
2311 
2312 	return 0;
2313 }
2314 EXPORT_SYMBOL_GPL(hid_connect);
2315 
hid_disconnect(struct hid_device * hdev)2316 void hid_disconnect(struct hid_device *hdev)
2317 {
2318 	device_remove_file(&hdev->dev, &dev_attr_country);
2319 	if (hdev->claimed & HID_CLAIMED_INPUT)
2320 		hidinput_disconnect(hdev);
2321 	if (hdev->claimed & HID_CLAIMED_HIDDEV)
2322 		hdev->hiddev_disconnect(hdev);
2323 	if (hdev->claimed & HID_CLAIMED_HIDRAW)
2324 		hidraw_disconnect(hdev);
2325 	hdev->claimed = 0;
2326 
2327 	hid_bpf_disconnect_device(hdev);
2328 }
2329 EXPORT_SYMBOL_GPL(hid_disconnect);
2330 
2331 /**
2332  * hid_hw_start - start underlying HW
2333  * @hdev: hid device
2334  * @connect_mask: which outputs to connect, see HID_CONNECT_*
2335  *
2336  * Call this in probe function *after* hid_parse. This will setup HW
2337  * buffers and start the device (if not defeirred to device open).
2338  * hid_hw_stop must be called if this was successful.
2339  */
hid_hw_start(struct hid_device * hdev,unsigned int connect_mask)2340 int hid_hw_start(struct hid_device *hdev, unsigned int connect_mask)
2341 {
2342 	int error;
2343 
2344 	error = hdev->ll_driver->start(hdev);
2345 	if (error)
2346 		return error;
2347 
2348 	if (connect_mask) {
2349 		error = hid_connect(hdev, connect_mask);
2350 		if (error) {
2351 			hdev->ll_driver->stop(hdev);
2352 			return error;
2353 		}
2354 	}
2355 
2356 	return 0;
2357 }
2358 EXPORT_SYMBOL_GPL(hid_hw_start);
2359 
2360 /**
2361  * hid_hw_stop - stop underlying HW
2362  * @hdev: hid device
2363  *
2364  * This is usually called from remove function or from probe when something
2365  * failed and hid_hw_start was called already.
2366  */
hid_hw_stop(struct hid_device * hdev)2367 void hid_hw_stop(struct hid_device *hdev)
2368 {
2369 	hid_disconnect(hdev);
2370 	hdev->ll_driver->stop(hdev);
2371 }
2372 EXPORT_SYMBOL_GPL(hid_hw_stop);
2373 
2374 /**
2375  * hid_hw_open - signal underlying HW to start delivering events
2376  * @hdev: hid device
2377  *
2378  * Tell underlying HW to start delivering events from the device.
2379  * This function should be called sometime after successful call
2380  * to hid_hw_start().
2381  */
hid_hw_open(struct hid_device * hdev)2382 int hid_hw_open(struct hid_device *hdev)
2383 {
2384 	int ret;
2385 
2386 	ret = mutex_lock_killable(&hdev->ll_open_lock);
2387 	if (ret)
2388 		return ret;
2389 
2390 	if (!hdev->ll_open_count++) {
2391 		ret = hdev->ll_driver->open(hdev);
2392 		if (ret)
2393 			hdev->ll_open_count--;
2394 	}
2395 
2396 	mutex_unlock(&hdev->ll_open_lock);
2397 	return ret;
2398 }
2399 EXPORT_SYMBOL_GPL(hid_hw_open);
2400 
2401 /**
2402  * hid_hw_close - signal underlaying HW to stop delivering events
2403  *
2404  * @hdev: hid device
2405  *
2406  * This function indicates that we are not interested in the events
2407  * from this device anymore. Delivery of events may or may not stop,
2408  * depending on the number of users still outstanding.
2409  */
hid_hw_close(struct hid_device * hdev)2410 void hid_hw_close(struct hid_device *hdev)
2411 {
2412 	mutex_lock(&hdev->ll_open_lock);
2413 	if (!--hdev->ll_open_count)
2414 		hdev->ll_driver->close(hdev);
2415 	mutex_unlock(&hdev->ll_open_lock);
2416 }
2417 EXPORT_SYMBOL_GPL(hid_hw_close);
2418 
2419 /**
2420  * hid_hw_request - send report request to device
2421  *
2422  * @hdev: hid device
2423  * @report: report to send
2424  * @reqtype: hid request type
2425  */
hid_hw_request(struct hid_device * hdev,struct hid_report * report,enum hid_class_request reqtype)2426 void hid_hw_request(struct hid_device *hdev,
2427 		    struct hid_report *report, enum hid_class_request reqtype)
2428 {
2429 	if (hdev->ll_driver->request)
2430 		return hdev->ll_driver->request(hdev, report, reqtype);
2431 
2432 	__hid_request(hdev, report, reqtype);
2433 }
2434 EXPORT_SYMBOL_GPL(hid_hw_request);
2435 
__hid_hw_raw_request(struct hid_device * hdev,unsigned char reportnum,__u8 * buf,size_t len,enum hid_report_type rtype,enum hid_class_request reqtype,u64 source,bool from_bpf)2436 int __hid_hw_raw_request(struct hid_device *hdev,
2437 			 unsigned char reportnum, __u8 *buf,
2438 			 size_t len, enum hid_report_type rtype,
2439 			 enum hid_class_request reqtype,
2440 			 u64 source, bool from_bpf)
2441 {
2442 	unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
2443 	int ret;
2444 
2445 	if (hdev->ll_driver->max_buffer_size)
2446 		max_buffer_size = hdev->ll_driver->max_buffer_size;
2447 
2448 	if (len < 1 || len > max_buffer_size || !buf)
2449 		return -EINVAL;
2450 
2451 	ret = dispatch_hid_bpf_raw_requests(hdev, reportnum, buf, len, rtype,
2452 					    reqtype, source, from_bpf);
2453 	if (ret)
2454 		return ret;
2455 
2456 	return hdev->ll_driver->raw_request(hdev, reportnum, buf, len,
2457 					    rtype, reqtype);
2458 }
2459 
2460 /**
2461  * hid_hw_raw_request - send report request to device
2462  *
2463  * @hdev: hid device
2464  * @reportnum: report ID
2465  * @buf: in/out data to transfer
2466  * @len: length of buf
2467  * @rtype: HID report type
2468  * @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT
2469  *
2470  * Return: count of data transferred, negative if error
2471  *
2472  * Same behavior as hid_hw_request, but with raw buffers instead.
2473  */
hid_hw_raw_request(struct hid_device * hdev,unsigned char reportnum,__u8 * buf,size_t len,enum hid_report_type rtype,enum hid_class_request reqtype)2474 int hid_hw_raw_request(struct hid_device *hdev,
2475 		       unsigned char reportnum, __u8 *buf,
2476 		       size_t len, enum hid_report_type rtype, enum hid_class_request reqtype)
2477 {
2478 	return __hid_hw_raw_request(hdev, reportnum, buf, len, rtype, reqtype, 0, false);
2479 }
2480 EXPORT_SYMBOL_GPL(hid_hw_raw_request);
2481 
__hid_hw_output_report(struct hid_device * hdev,__u8 * buf,size_t len,u64 source,bool from_bpf)2482 int __hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len, u64 source,
2483 			   bool from_bpf)
2484 {
2485 	unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
2486 	int ret;
2487 
2488 	if (hdev->ll_driver->max_buffer_size)
2489 		max_buffer_size = hdev->ll_driver->max_buffer_size;
2490 
2491 	if (len < 1 || len > max_buffer_size || !buf)
2492 		return -EINVAL;
2493 
2494 	ret = dispatch_hid_bpf_output_report(hdev, buf, len, source, from_bpf);
2495 	if (ret)
2496 		return ret;
2497 
2498 	if (hdev->ll_driver->output_report)
2499 		return hdev->ll_driver->output_report(hdev, buf, len);
2500 
2501 	return -ENOSYS;
2502 }
2503 
2504 /**
2505  * hid_hw_output_report - send output report to device
2506  *
2507  * @hdev: hid device
2508  * @buf: raw data to transfer
2509  * @len: length of buf
2510  *
2511  * Return: count of data transferred, negative if error
2512  */
hid_hw_output_report(struct hid_device * hdev,__u8 * buf,size_t len)2513 int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len)
2514 {
2515 	return __hid_hw_output_report(hdev, buf, len, 0, false);
2516 }
2517 EXPORT_SYMBOL_GPL(hid_hw_output_report);
2518 
2519 #ifdef CONFIG_PM
hid_driver_suspend(struct hid_device * hdev,pm_message_t state)2520 int hid_driver_suspend(struct hid_device *hdev, pm_message_t state)
2521 {
2522 	if (hdev->driver && hdev->driver->suspend)
2523 		return hdev->driver->suspend(hdev, state);
2524 
2525 	return 0;
2526 }
2527 EXPORT_SYMBOL_GPL(hid_driver_suspend);
2528 
hid_driver_reset_resume(struct hid_device * hdev)2529 int hid_driver_reset_resume(struct hid_device *hdev)
2530 {
2531 	if (hdev->driver && hdev->driver->reset_resume)
2532 		return hdev->driver->reset_resume(hdev);
2533 
2534 	return 0;
2535 }
2536 EXPORT_SYMBOL_GPL(hid_driver_reset_resume);
2537 
hid_driver_resume(struct hid_device * hdev)2538 int hid_driver_resume(struct hid_device *hdev)
2539 {
2540 	if (hdev->driver && hdev->driver->resume)
2541 		return hdev->driver->resume(hdev);
2542 
2543 	return 0;
2544 }
2545 EXPORT_SYMBOL_GPL(hid_driver_resume);
2546 #endif /* CONFIG_PM */
2547 
2548 struct hid_dynid {
2549 	struct list_head list;
2550 	struct hid_device_id id;
2551 };
2552 
2553 /**
2554  * new_id_store - add a new HID device ID to this driver and re-probe devices
2555  * @drv: target device driver
2556  * @buf: buffer for scanning device ID data
2557  * @count: input size
2558  *
2559  * Adds a new dynamic hid device ID to this driver,
2560  * and causes the driver to probe for all devices again.
2561  */
new_id_store(struct device_driver * drv,const char * buf,size_t count)2562 static ssize_t new_id_store(struct device_driver *drv, const char *buf,
2563 		size_t count)
2564 {
2565 	struct hid_driver *hdrv = to_hid_driver(drv);
2566 	struct hid_dynid *dynid;
2567 	__u32 bus, vendor, product;
2568 	unsigned long driver_data = 0;
2569 	int ret;
2570 
2571 	ret = sscanf(buf, "%x %x %x %lx",
2572 			&bus, &vendor, &product, &driver_data);
2573 	if (ret < 3)
2574 		return -EINVAL;
2575 
2576 	dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
2577 	if (!dynid)
2578 		return -ENOMEM;
2579 
2580 	dynid->id.bus = bus;
2581 	dynid->id.group = HID_GROUP_ANY;
2582 	dynid->id.vendor = vendor;
2583 	dynid->id.product = product;
2584 	dynid->id.driver_data = driver_data;
2585 
2586 	spin_lock(&hdrv->dyn_lock);
2587 	list_add_tail(&dynid->list, &hdrv->dyn_list);
2588 	spin_unlock(&hdrv->dyn_lock);
2589 
2590 	ret = driver_attach(&hdrv->driver);
2591 
2592 	return ret ? : count;
2593 }
2594 static DRIVER_ATTR_WO(new_id);
2595 
2596 static struct attribute *hid_drv_attrs[] = {
2597 	&driver_attr_new_id.attr,
2598 	NULL,
2599 };
2600 ATTRIBUTE_GROUPS(hid_drv);
2601 
hid_free_dynids(struct hid_driver * hdrv)2602 static void hid_free_dynids(struct hid_driver *hdrv)
2603 {
2604 	struct hid_dynid *dynid, *n;
2605 
2606 	spin_lock(&hdrv->dyn_lock);
2607 	list_for_each_entry_safe(dynid, n, &hdrv->dyn_list, list) {
2608 		list_del(&dynid->list);
2609 		kfree(dynid);
2610 	}
2611 	spin_unlock(&hdrv->dyn_lock);
2612 }
2613 
hid_match_device(struct hid_device * hdev,struct hid_driver * hdrv)2614 const struct hid_device_id *hid_match_device(struct hid_device *hdev,
2615 					     struct hid_driver *hdrv)
2616 {
2617 	struct hid_dynid *dynid;
2618 
2619 	spin_lock(&hdrv->dyn_lock);
2620 	list_for_each_entry(dynid, &hdrv->dyn_list, list) {
2621 		if (hid_match_one_id(hdev, &dynid->id)) {
2622 			spin_unlock(&hdrv->dyn_lock);
2623 			return &dynid->id;
2624 		}
2625 	}
2626 	spin_unlock(&hdrv->dyn_lock);
2627 
2628 	return hid_match_id(hdev, hdrv->id_table);
2629 }
2630 EXPORT_SYMBOL_GPL(hid_match_device);
2631 
hid_bus_match(struct device * dev,const struct device_driver * drv)2632 static int hid_bus_match(struct device *dev, const struct device_driver *drv)
2633 {
2634 	struct hid_driver *hdrv = to_hid_driver(drv);
2635 	struct hid_device *hdev = to_hid_device(dev);
2636 
2637 	return hid_match_device(hdev, hdrv) != NULL;
2638 }
2639 
2640 /**
2641  * hid_compare_device_paths - check if both devices share the same path
2642  * @hdev_a: hid device
2643  * @hdev_b: hid device
2644  * @separator: char to use as separator
2645  *
2646  * Check if two devices share the same path up to the last occurrence of
2647  * the separator char. Both paths must exist (i.e., zero-length paths
2648  * don't match).
2649  */
hid_compare_device_paths(struct hid_device * hdev_a,struct hid_device * hdev_b,char separator)2650 bool hid_compare_device_paths(struct hid_device *hdev_a,
2651 			      struct hid_device *hdev_b, char separator)
2652 {
2653 	int n1 = strrchr(hdev_a->phys, separator) - hdev_a->phys;
2654 	int n2 = strrchr(hdev_b->phys, separator) - hdev_b->phys;
2655 
2656 	if (n1 != n2 || n1 <= 0 || n2 <= 0)
2657 		return false;
2658 
2659 	return !strncmp(hdev_a->phys, hdev_b->phys, n1);
2660 }
2661 EXPORT_SYMBOL_GPL(hid_compare_device_paths);
2662 
hid_check_device_match(struct hid_device * hdev,struct hid_driver * hdrv,const struct hid_device_id ** id)2663 static bool hid_check_device_match(struct hid_device *hdev,
2664 				   struct hid_driver *hdrv,
2665 				   const struct hid_device_id **id)
2666 {
2667 	*id = hid_match_device(hdev, hdrv);
2668 	if (!*id)
2669 		return false;
2670 
2671 	if (hdrv->match)
2672 		return hdrv->match(hdev, hid_ignore_special_drivers);
2673 
2674 	/*
2675 	 * hid-generic implements .match(), so we must be dealing with a
2676 	 * different HID driver here, and can simply check if
2677 	 * hid_ignore_special_drivers is set or not.
2678 	 */
2679 	return !hid_ignore_special_drivers;
2680 }
2681 
__hid_device_probe(struct hid_device * hdev,struct hid_driver * hdrv)2682 static int __hid_device_probe(struct hid_device *hdev, struct hid_driver *hdrv)
2683 {
2684 	const struct hid_device_id *id;
2685 	int ret;
2686 
2687 	if (!hid_check_device_match(hdev, hdrv, &id))
2688 		return -ENODEV;
2689 
2690 	hdev->devres_group_id = devres_open_group(&hdev->dev, NULL, GFP_KERNEL);
2691 	if (!hdev->devres_group_id)
2692 		return -ENOMEM;
2693 
2694 	/* reset the quirks that has been previously set */
2695 	hdev->quirks = hid_lookup_quirk(hdev);
2696 	hdev->driver = hdrv;
2697 
2698 	if (hdrv->probe) {
2699 		ret = hdrv->probe(hdev, id);
2700 	} else { /* default probe */
2701 		ret = hid_open_report(hdev);
2702 		if (!ret)
2703 			ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
2704 	}
2705 
2706 	/*
2707 	 * Note that we are not closing the devres group opened above so
2708 	 * even resources that were attached to the device after probe is
2709 	 * run are released when hid_device_remove() is executed. This is
2710 	 * needed as some drivers would allocate additional resources,
2711 	 * for example when updating firmware.
2712 	 */
2713 
2714 	if (ret) {
2715 		devres_release_group(&hdev->dev, hdev->devres_group_id);
2716 		hid_close_report(hdev);
2717 		hdev->driver = NULL;
2718 	}
2719 
2720 	return ret;
2721 }
2722 
hid_device_probe(struct device * dev)2723 static int hid_device_probe(struct device *dev)
2724 {
2725 	struct hid_device *hdev = to_hid_device(dev);
2726 	struct hid_driver *hdrv = to_hid_driver(dev->driver);
2727 	int ret = 0;
2728 
2729 	if (down_interruptible(&hdev->driver_input_lock))
2730 		return -EINTR;
2731 
2732 	hdev->io_started = false;
2733 	clear_bit(ffs(HID_STAT_REPROBED), &hdev->status);
2734 
2735 	if (!hdev->driver)
2736 		ret = __hid_device_probe(hdev, hdrv);
2737 
2738 	if (!hdev->io_started)
2739 		up(&hdev->driver_input_lock);
2740 
2741 	return ret;
2742 }
2743 
hid_device_remove(struct device * dev)2744 static void hid_device_remove(struct device *dev)
2745 {
2746 	struct hid_device *hdev = to_hid_device(dev);
2747 	struct hid_driver *hdrv;
2748 
2749 	down(&hdev->driver_input_lock);
2750 	hdev->io_started = false;
2751 
2752 	hdrv = hdev->driver;
2753 	if (hdrv) {
2754 		if (hdrv->remove)
2755 			hdrv->remove(hdev);
2756 		else /* default remove */
2757 			hid_hw_stop(hdev);
2758 
2759 		/* Release all devres resources allocated by the driver */
2760 		devres_release_group(&hdev->dev, hdev->devres_group_id);
2761 
2762 		hid_close_report(hdev);
2763 		hdev->driver = NULL;
2764 	}
2765 
2766 	if (!hdev->io_started)
2767 		up(&hdev->driver_input_lock);
2768 }
2769 
modalias_show(struct device * dev,struct device_attribute * a,char * buf)2770 static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
2771 			     char *buf)
2772 {
2773 	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
2774 
2775 	return scnprintf(buf, PAGE_SIZE, "hid:b%04Xg%04Xv%08Xp%08X\n",
2776 			 hdev->bus, hdev->group, hdev->vendor, hdev->product);
2777 }
2778 static DEVICE_ATTR_RO(modalias);
2779 
2780 static struct attribute *hid_dev_attrs[] = {
2781 	&dev_attr_modalias.attr,
2782 	NULL,
2783 };
2784 static struct bin_attribute *hid_dev_bin_attrs[] = {
2785 	&dev_bin_attr_report_desc,
2786 	NULL
2787 };
2788 static const struct attribute_group hid_dev_group = {
2789 	.attrs = hid_dev_attrs,
2790 	.bin_attrs = hid_dev_bin_attrs,
2791 };
2792 __ATTRIBUTE_GROUPS(hid_dev);
2793 
hid_uevent(const struct device * dev,struct kobj_uevent_env * env)2794 static int hid_uevent(const struct device *dev, struct kobj_uevent_env *env)
2795 {
2796 	const struct hid_device *hdev = to_hid_device(dev);
2797 
2798 	if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X",
2799 			hdev->bus, hdev->vendor, hdev->product))
2800 		return -ENOMEM;
2801 
2802 	if (add_uevent_var(env, "HID_NAME=%s", hdev->name))
2803 		return -ENOMEM;
2804 
2805 	if (add_uevent_var(env, "HID_PHYS=%s", hdev->phys))
2806 		return -ENOMEM;
2807 
2808 	if (add_uevent_var(env, "HID_UNIQ=%s", hdev->uniq))
2809 		return -ENOMEM;
2810 
2811 	if (add_uevent_var(env, "MODALIAS=hid:b%04Xg%04Xv%08Xp%08X",
2812 			   hdev->bus, hdev->group, hdev->vendor, hdev->product))
2813 		return -ENOMEM;
2814 
2815 	return 0;
2816 }
2817 
2818 const struct bus_type hid_bus_type = {
2819 	.name		= "hid",
2820 	.dev_groups	= hid_dev_groups,
2821 	.drv_groups	= hid_drv_groups,
2822 	.match		= hid_bus_match,
2823 	.probe		= hid_device_probe,
2824 	.remove		= hid_device_remove,
2825 	.uevent		= hid_uevent,
2826 };
2827 EXPORT_SYMBOL(hid_bus_type);
2828 
hid_add_device(struct hid_device * hdev)2829 int hid_add_device(struct hid_device *hdev)
2830 {
2831 	static atomic_t id = ATOMIC_INIT(0);
2832 	int ret;
2833 
2834 	if (WARN_ON(hdev->status & HID_STAT_ADDED))
2835 		return -EBUSY;
2836 
2837 	hdev->quirks = hid_lookup_quirk(hdev);
2838 
2839 	/* we need to kill them here, otherwise they will stay allocated to
2840 	 * wait for coming driver */
2841 	if (hid_ignore(hdev))
2842 		return -ENODEV;
2843 
2844 	/*
2845 	 * Check for the mandatory transport channel.
2846 	 */
2847 	 if (!hdev->ll_driver->raw_request) {
2848 		hid_err(hdev, "transport driver missing .raw_request()\n");
2849 		return -EINVAL;
2850 	 }
2851 
2852 	/*
2853 	 * Read the device report descriptor once and use as template
2854 	 * for the driver-specific modifications.
2855 	 */
2856 	ret = hdev->ll_driver->parse(hdev);
2857 	if (ret)
2858 		return ret;
2859 	if (!hdev->dev_rdesc)
2860 		return -ENODEV;
2861 
2862 	/*
2863 	 * Scan generic devices for group information
2864 	 */
2865 	if (hid_ignore_special_drivers) {
2866 		hdev->group = HID_GROUP_GENERIC;
2867 	} else if (!hdev->group &&
2868 		   !(hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)) {
2869 		ret = hid_scan_report(hdev);
2870 		if (ret)
2871 			hid_warn(hdev, "bad device descriptor (%d)\n", ret);
2872 	}
2873 
2874 	hdev->id = atomic_inc_return(&id);
2875 
2876 	/* XXX hack, any other cleaner solution after the driver core
2877 	 * is converted to allow more than 20 bytes as the device name? */
2878 	dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
2879 		     hdev->vendor, hdev->product, hdev->id);
2880 
2881 	hid_debug_register(hdev, dev_name(&hdev->dev));
2882 	ret = device_add(&hdev->dev);
2883 	if (!ret)
2884 		hdev->status |= HID_STAT_ADDED;
2885 	else
2886 		hid_debug_unregister(hdev);
2887 
2888 	return ret;
2889 }
2890 EXPORT_SYMBOL_GPL(hid_add_device);
2891 
2892 /**
2893  * hid_allocate_device - allocate new hid device descriptor
2894  *
2895  * Allocate and initialize hid device, so that hid_destroy_device might be
2896  * used to free it.
2897  *
2898  * New hid_device pointer is returned on success, otherwise ERR_PTR encoded
2899  * error value.
2900  */
hid_allocate_device(void)2901 struct hid_device *hid_allocate_device(void)
2902 {
2903 	struct hid_device *hdev;
2904 	int ret = -ENOMEM;
2905 
2906 	hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2907 	if (hdev == NULL)
2908 		return ERR_PTR(ret);
2909 
2910 	device_initialize(&hdev->dev);
2911 	hdev->dev.release = hid_device_release;
2912 	hdev->dev.bus = &hid_bus_type;
2913 	device_enable_async_suspend(&hdev->dev);
2914 
2915 	hid_close_report(hdev);
2916 
2917 	init_waitqueue_head(&hdev->debug_wait);
2918 	INIT_LIST_HEAD(&hdev->debug_list);
2919 	spin_lock_init(&hdev->debug_list_lock);
2920 	sema_init(&hdev->driver_input_lock, 1);
2921 	mutex_init(&hdev->ll_open_lock);
2922 	kref_init(&hdev->ref);
2923 
2924 	ret = hid_bpf_device_init(hdev);
2925 	if (ret)
2926 		goto out_err;
2927 
2928 	return hdev;
2929 
2930 out_err:
2931 	hid_destroy_device(hdev);
2932 	return ERR_PTR(ret);
2933 }
2934 EXPORT_SYMBOL_GPL(hid_allocate_device);
2935 
hid_remove_device(struct hid_device * hdev)2936 static void hid_remove_device(struct hid_device *hdev)
2937 {
2938 	if (hdev->status & HID_STAT_ADDED) {
2939 		device_del(&hdev->dev);
2940 		hid_debug_unregister(hdev);
2941 		hdev->status &= ~HID_STAT_ADDED;
2942 	}
2943 	kfree(hdev->dev_rdesc);
2944 	hdev->dev_rdesc = NULL;
2945 	hdev->dev_rsize = 0;
2946 }
2947 
2948 /**
2949  * hid_destroy_device - free previously allocated device
2950  *
2951  * @hdev: hid device
2952  *
2953  * If you allocate hid_device through hid_allocate_device, you should ever
2954  * free by this function.
2955  */
hid_destroy_device(struct hid_device * hdev)2956 void hid_destroy_device(struct hid_device *hdev)
2957 {
2958 	hid_bpf_destroy_device(hdev);
2959 	hid_remove_device(hdev);
2960 	put_device(&hdev->dev);
2961 }
2962 EXPORT_SYMBOL_GPL(hid_destroy_device);
2963 
2964 
__hid_bus_reprobe_drivers(struct device * dev,void * data)2965 static int __hid_bus_reprobe_drivers(struct device *dev, void *data)
2966 {
2967 	struct hid_driver *hdrv = data;
2968 	struct hid_device *hdev = to_hid_device(dev);
2969 
2970 	if (hdev->driver == hdrv &&
2971 	    !hdrv->match(hdev, hid_ignore_special_drivers) &&
2972 	    !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
2973 		return device_reprobe(dev);
2974 
2975 	return 0;
2976 }
2977 
__hid_bus_driver_added(struct device_driver * drv,void * data)2978 static int __hid_bus_driver_added(struct device_driver *drv, void *data)
2979 {
2980 	struct hid_driver *hdrv = to_hid_driver(drv);
2981 
2982 	if (hdrv->match) {
2983 		bus_for_each_dev(&hid_bus_type, NULL, hdrv,
2984 				 __hid_bus_reprobe_drivers);
2985 	}
2986 
2987 	return 0;
2988 }
2989 
__bus_removed_driver(struct device_driver * drv,void * data)2990 static int __bus_removed_driver(struct device_driver *drv, void *data)
2991 {
2992 	return bus_rescan_devices(&hid_bus_type);
2993 }
2994 
__hid_register_driver(struct hid_driver * hdrv,struct module * owner,const char * mod_name)2995 int __hid_register_driver(struct hid_driver *hdrv, struct module *owner,
2996 		const char *mod_name)
2997 {
2998 	int ret;
2999 
3000 	hdrv->driver.name = hdrv->name;
3001 	hdrv->driver.bus = &hid_bus_type;
3002 	hdrv->driver.owner = owner;
3003 	hdrv->driver.mod_name = mod_name;
3004 
3005 	INIT_LIST_HEAD(&hdrv->dyn_list);
3006 	spin_lock_init(&hdrv->dyn_lock);
3007 
3008 	ret = driver_register(&hdrv->driver);
3009 
3010 	if (ret == 0)
3011 		bus_for_each_drv(&hid_bus_type, NULL, NULL,
3012 				 __hid_bus_driver_added);
3013 
3014 	return ret;
3015 }
3016 EXPORT_SYMBOL_GPL(__hid_register_driver);
3017 
hid_unregister_driver(struct hid_driver * hdrv)3018 void hid_unregister_driver(struct hid_driver *hdrv)
3019 {
3020 	driver_unregister(&hdrv->driver);
3021 	hid_free_dynids(hdrv);
3022 
3023 	bus_for_each_drv(&hid_bus_type, NULL, hdrv, __bus_removed_driver);
3024 }
3025 EXPORT_SYMBOL_GPL(hid_unregister_driver);
3026 
hid_check_keys_pressed(struct hid_device * hid)3027 int hid_check_keys_pressed(struct hid_device *hid)
3028 {
3029 	struct hid_input *hidinput;
3030 	int i;
3031 
3032 	if (!(hid->claimed & HID_CLAIMED_INPUT))
3033 		return 0;
3034 
3035 	list_for_each_entry(hidinput, &hid->inputs, list) {
3036 		for (i = 0; i < BITS_TO_LONGS(KEY_MAX); i++)
3037 			if (hidinput->input->key[i])
3038 				return 1;
3039 	}
3040 
3041 	return 0;
3042 }
3043 EXPORT_SYMBOL_GPL(hid_check_keys_pressed);
3044 
3045 #ifdef CONFIG_HID_BPF
3046 static struct hid_ops __hid_ops = {
3047 	.hid_get_report = hid_get_report,
3048 	.hid_hw_raw_request = __hid_hw_raw_request,
3049 	.hid_hw_output_report = __hid_hw_output_report,
3050 	.hid_input_report = __hid_input_report,
3051 	.owner = THIS_MODULE,
3052 	.bus_type = &hid_bus_type,
3053 };
3054 #endif
3055 
hid_init(void)3056 static int __init hid_init(void)
3057 {
3058 	int ret;
3059 
3060 	ret = bus_register(&hid_bus_type);
3061 	if (ret) {
3062 		pr_err("can't register hid bus\n");
3063 		goto err;
3064 	}
3065 
3066 #ifdef CONFIG_HID_BPF
3067 	hid_ops = &__hid_ops;
3068 #endif
3069 
3070 	ret = hidraw_init();
3071 	if (ret)
3072 		goto err_bus;
3073 
3074 	hid_debug_init();
3075 
3076 	return 0;
3077 err_bus:
3078 	bus_unregister(&hid_bus_type);
3079 err:
3080 	return ret;
3081 }
3082 
hid_exit(void)3083 static void __exit hid_exit(void)
3084 {
3085 #ifdef CONFIG_HID_BPF
3086 	hid_ops = NULL;
3087 #endif
3088 	hid_debug_exit();
3089 	hidraw_exit();
3090 	bus_unregister(&hid_bus_type);
3091 	hid_quirks_exit(HID_BUS_ANY);
3092 }
3093 
3094 module_init(hid_init);
3095 module_exit(hid_exit);
3096 
3097 MODULE_AUTHOR("Andreas Gal");
3098 MODULE_AUTHOR("Vojtech Pavlik");
3099 MODULE_AUTHOR("Jiri Kosina");
3100 MODULE_DESCRIPTION("HID support for Linux");
3101 MODULE_LICENSE("GPL");
3102