xref: /linux/drivers/hid/hid-core.c (revision 6093a688a07da07808f0122f9aa2a3eed250d853)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  HID support for Linux
4  *
5  *  Copyright (c) 1999 Andreas Gal
6  *  Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
7  *  Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
8  *  Copyright (c) 2006-2012 Jiri Kosina
9  */
10 
11 /*
12  */
13 
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/list.h>
21 #include <linux/mm.h>
22 #include <linux/spinlock.h>
23 #include <linux/unaligned.h>
24 #include <asm/byteorder.h>
25 #include <linux/input.h>
26 #include <linux/wait.h>
27 #include <linux/vmalloc.h>
28 #include <linux/sched.h>
29 #include <linux/semaphore.h>
30 
31 #include <linux/hid.h>
32 #include <linux/hiddev.h>
33 #include <linux/hid-debug.h>
34 #include <linux/hidraw.h>
35 
36 #include "hid-ids.h"
37 
38 /*
39  * Version Information
40  */
41 
42 #define DRIVER_DESC "HID core driver"
43 
44 static int hid_ignore_special_drivers = 0;
45 module_param_named(ignore_special_drivers, hid_ignore_special_drivers, int, 0600);
46 MODULE_PARM_DESC(ignore_special_drivers, "Ignore any special drivers and handle all devices by generic driver");
47 
48 /*
49  * Convert a signed n-bit integer to signed 32-bit integer.
50  */
51 
52 static s32 snto32(__u32 value, unsigned int n)
53 {
54 	if (!value || !n)
55 		return 0;
56 
57 	if (n > 32)
58 		n = 32;
59 
60 	return sign_extend32(value, n - 1);
61 }
62 
63 /*
64  * Convert a signed 32-bit integer to a signed n-bit integer.
65  */
66 
67 static u32 s32ton(__s32 value, unsigned int n)
68 {
69 	s32 a;
70 
71 	if (!value || !n)
72 		return 0;
73 
74 	a = value >> (n - 1);
75 	if (a && a != -1)
76 		return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1;
77 	return value & ((1 << n) - 1);
78 }
79 
80 /*
81  * Register a new report for a device.
82  */
83 
84 struct hid_report *hid_register_report(struct hid_device *device,
85 				       enum hid_report_type type, unsigned int id,
86 				       unsigned int application)
87 {
88 	struct hid_report_enum *report_enum = device->report_enum + type;
89 	struct hid_report *report;
90 
91 	if (id >= HID_MAX_IDS)
92 		return NULL;
93 	if (report_enum->report_id_hash[id])
94 		return report_enum->report_id_hash[id];
95 
96 	report = kzalloc(sizeof(struct hid_report), GFP_KERNEL);
97 	if (!report)
98 		return NULL;
99 
100 	if (id != 0)
101 		report_enum->numbered = 1;
102 
103 	report->id = id;
104 	report->type = type;
105 	report->size = 0;
106 	report->device = device;
107 	report->application = application;
108 	report_enum->report_id_hash[id] = report;
109 
110 	list_add_tail(&report->list, &report_enum->report_list);
111 	INIT_LIST_HEAD(&report->field_entry_list);
112 
113 	return report;
114 }
115 EXPORT_SYMBOL_GPL(hid_register_report);
116 
117 /*
118  * Register a new field for this report.
119  */
120 
121 static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages)
122 {
123 	struct hid_field *field;
124 
125 	if (report->maxfield == HID_MAX_FIELDS) {
126 		hid_err(report->device, "too many fields in report\n");
127 		return NULL;
128 	}
129 
130 	field = kvzalloc((sizeof(struct hid_field) +
131 			  usages * sizeof(struct hid_usage) +
132 			  3 * usages * sizeof(unsigned int)), GFP_KERNEL);
133 	if (!field)
134 		return NULL;
135 
136 	field->index = report->maxfield++;
137 	report->field[field->index] = field;
138 	field->usage = (struct hid_usage *)(field + 1);
139 	field->value = (s32 *)(field->usage + usages);
140 	field->new_value = (s32 *)(field->value + usages);
141 	field->usages_priorities = (s32 *)(field->new_value + usages);
142 	field->report = report;
143 
144 	return field;
145 }
146 
147 /*
148  * Open a collection. The type/usage is pushed on the stack.
149  */
150 
151 static int open_collection(struct hid_parser *parser, unsigned type)
152 {
153 	struct hid_collection *collection;
154 	unsigned usage;
155 	int collection_index;
156 
157 	usage = parser->local.usage[0];
158 
159 	if (parser->collection_stack_ptr == parser->collection_stack_size) {
160 		unsigned int *collection_stack;
161 		unsigned int new_size = parser->collection_stack_size +
162 					HID_COLLECTION_STACK_SIZE;
163 
164 		collection_stack = krealloc(parser->collection_stack,
165 					    new_size * sizeof(unsigned int),
166 					    GFP_KERNEL);
167 		if (!collection_stack)
168 			return -ENOMEM;
169 
170 		parser->collection_stack = collection_stack;
171 		parser->collection_stack_size = new_size;
172 	}
173 
174 	if (parser->device->maxcollection == parser->device->collection_size) {
175 		collection = kmalloc(
176 				array3_size(sizeof(struct hid_collection),
177 					    parser->device->collection_size,
178 					    2),
179 				GFP_KERNEL);
180 		if (collection == NULL) {
181 			hid_err(parser->device, "failed to reallocate collection array\n");
182 			return -ENOMEM;
183 		}
184 		memcpy(collection, parser->device->collection,
185 			sizeof(struct hid_collection) *
186 			parser->device->collection_size);
187 		memset(collection + parser->device->collection_size, 0,
188 			sizeof(struct hid_collection) *
189 			parser->device->collection_size);
190 		kfree(parser->device->collection);
191 		parser->device->collection = collection;
192 		parser->device->collection_size *= 2;
193 	}
194 
195 	parser->collection_stack[parser->collection_stack_ptr++] =
196 		parser->device->maxcollection;
197 
198 	collection_index = parser->device->maxcollection++;
199 	collection = parser->device->collection + collection_index;
200 	collection->type = type;
201 	collection->usage = usage;
202 	collection->level = parser->collection_stack_ptr - 1;
203 	collection->parent_idx = (collection->level == 0) ? -1 :
204 		parser->collection_stack[collection->level - 1];
205 
206 	if (type == HID_COLLECTION_APPLICATION)
207 		parser->device->maxapplication++;
208 
209 	return 0;
210 }
211 
212 /*
213  * Close a collection.
214  */
215 
216 static int close_collection(struct hid_parser *parser)
217 {
218 	if (!parser->collection_stack_ptr) {
219 		hid_err(parser->device, "collection stack underflow\n");
220 		return -EINVAL;
221 	}
222 	parser->collection_stack_ptr--;
223 	return 0;
224 }
225 
226 /*
227  * Climb up the stack, search for the specified collection type
228  * and return the usage.
229  */
230 
231 static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
232 {
233 	struct hid_collection *collection = parser->device->collection;
234 	int n;
235 
236 	for (n = parser->collection_stack_ptr - 1; n >= 0; n--) {
237 		unsigned index = parser->collection_stack[n];
238 		if (collection[index].type == type)
239 			return collection[index].usage;
240 	}
241 	return 0; /* we know nothing about this usage type */
242 }
243 
244 /*
245  * Concatenate usage which defines 16 bits or less with the
246  * currently defined usage page to form a 32 bit usage
247  */
248 
249 static void complete_usage(struct hid_parser *parser, unsigned int index)
250 {
251 	parser->local.usage[index] &= 0xFFFF;
252 	parser->local.usage[index] |=
253 		(parser->global.usage_page & 0xFFFF) << 16;
254 }
255 
256 /*
257  * Add a usage to the temporary parser table.
258  */
259 
260 static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size)
261 {
262 	if (parser->local.usage_index >= HID_MAX_USAGES) {
263 		hid_err(parser->device, "usage index exceeded\n");
264 		return -1;
265 	}
266 	parser->local.usage[parser->local.usage_index] = usage;
267 
268 	/*
269 	 * If Usage item only includes usage id, concatenate it with
270 	 * currently defined usage page
271 	 */
272 	if (size <= 2)
273 		complete_usage(parser, parser->local.usage_index);
274 
275 	parser->local.usage_size[parser->local.usage_index] = size;
276 	parser->local.collection_index[parser->local.usage_index] =
277 		parser->collection_stack_ptr ?
278 		parser->collection_stack[parser->collection_stack_ptr - 1] : 0;
279 	parser->local.usage_index++;
280 	return 0;
281 }
282 
283 /*
284  * Register a new field for this report.
285  */
286 
287 static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsigned flags)
288 {
289 	struct hid_report *report;
290 	struct hid_field *field;
291 	unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
292 	unsigned int usages;
293 	unsigned int offset;
294 	unsigned int i;
295 	unsigned int application;
296 
297 	application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION);
298 
299 	report = hid_register_report(parser->device, report_type,
300 				     parser->global.report_id, application);
301 	if (!report) {
302 		hid_err(parser->device, "hid_register_report failed\n");
303 		return -1;
304 	}
305 
306 	/* Handle both signed and unsigned cases properly */
307 	if ((parser->global.logical_minimum < 0 &&
308 		parser->global.logical_maximum <
309 		parser->global.logical_minimum) ||
310 		(parser->global.logical_minimum >= 0 &&
311 		(__u32)parser->global.logical_maximum <
312 		(__u32)parser->global.logical_minimum)) {
313 		dbg_hid("logical range invalid 0x%x 0x%x\n",
314 			parser->global.logical_minimum,
315 			parser->global.logical_maximum);
316 		return -1;
317 	}
318 
319 	offset = report->size;
320 	report->size += parser->global.report_size * parser->global.report_count;
321 
322 	if (parser->device->ll_driver->max_buffer_size)
323 		max_buffer_size = parser->device->ll_driver->max_buffer_size;
324 
325 	/* Total size check: Allow for possible report index byte */
326 	if (report->size > (max_buffer_size - 1) << 3) {
327 		hid_err(parser->device, "report is too long\n");
328 		return -1;
329 	}
330 
331 	if (!parser->local.usage_index) /* Ignore padding fields */
332 		return 0;
333 
334 	usages = max_t(unsigned, parser->local.usage_index,
335 				 parser->global.report_count);
336 
337 	field = hid_register_field(report, usages);
338 	if (!field)
339 		return 0;
340 
341 	field->physical = hid_lookup_collection(parser, HID_COLLECTION_PHYSICAL);
342 	field->logical = hid_lookup_collection(parser, HID_COLLECTION_LOGICAL);
343 	field->application = application;
344 
345 	for (i = 0; i < usages; i++) {
346 		unsigned j = i;
347 		/* Duplicate the last usage we parsed if we have excess values */
348 		if (i >= parser->local.usage_index)
349 			j = parser->local.usage_index - 1;
350 		field->usage[i].hid = parser->local.usage[j];
351 		field->usage[i].collection_index =
352 			parser->local.collection_index[j];
353 		field->usage[i].usage_index = i;
354 		field->usage[i].resolution_multiplier = 1;
355 	}
356 
357 	field->maxusage = usages;
358 	field->flags = flags;
359 	field->report_offset = offset;
360 	field->report_type = report_type;
361 	field->report_size = parser->global.report_size;
362 	field->report_count = parser->global.report_count;
363 	field->logical_minimum = parser->global.logical_minimum;
364 	field->logical_maximum = parser->global.logical_maximum;
365 	field->physical_minimum = parser->global.physical_minimum;
366 	field->physical_maximum = parser->global.physical_maximum;
367 	field->unit_exponent = parser->global.unit_exponent;
368 	field->unit = parser->global.unit;
369 
370 	return 0;
371 }
372 
373 /*
374  * Read data value from item.
375  */
376 
377 static u32 item_udata(struct hid_item *item)
378 {
379 	switch (item->size) {
380 	case 1: return item->data.u8;
381 	case 2: return item->data.u16;
382 	case 4: return item->data.u32;
383 	}
384 	return 0;
385 }
386 
387 static s32 item_sdata(struct hid_item *item)
388 {
389 	switch (item->size) {
390 	case 1: return item->data.s8;
391 	case 2: return item->data.s16;
392 	case 4: return item->data.s32;
393 	}
394 	return 0;
395 }
396 
397 /*
398  * Process a global item.
399  */
400 
401 static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
402 {
403 	__s32 raw_value;
404 	switch (item->tag) {
405 	case HID_GLOBAL_ITEM_TAG_PUSH:
406 
407 		if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) {
408 			hid_err(parser->device, "global environment stack overflow\n");
409 			return -1;
410 		}
411 
412 		memcpy(parser->global_stack + parser->global_stack_ptr++,
413 			&parser->global, sizeof(struct hid_global));
414 		return 0;
415 
416 	case HID_GLOBAL_ITEM_TAG_POP:
417 
418 		if (!parser->global_stack_ptr) {
419 			hid_err(parser->device, "global environment stack underflow\n");
420 			return -1;
421 		}
422 
423 		memcpy(&parser->global, parser->global_stack +
424 			--parser->global_stack_ptr, sizeof(struct hid_global));
425 		return 0;
426 
427 	case HID_GLOBAL_ITEM_TAG_USAGE_PAGE:
428 		parser->global.usage_page = item_udata(item);
429 		return 0;
430 
431 	case HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM:
432 		parser->global.logical_minimum = item_sdata(item);
433 		return 0;
434 
435 	case HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM:
436 		if (parser->global.logical_minimum < 0)
437 			parser->global.logical_maximum = item_sdata(item);
438 		else
439 			parser->global.logical_maximum = item_udata(item);
440 		return 0;
441 
442 	case HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM:
443 		parser->global.physical_minimum = item_sdata(item);
444 		return 0;
445 
446 	case HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM:
447 		if (parser->global.physical_minimum < 0)
448 			parser->global.physical_maximum = item_sdata(item);
449 		else
450 			parser->global.physical_maximum = item_udata(item);
451 		return 0;
452 
453 	case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT:
454 		/* Many devices provide unit exponent as a two's complement
455 		 * nibble due to the common misunderstanding of HID
456 		 * specification 1.11, 6.2.2.7 Global Items. Attempt to handle
457 		 * both this and the standard encoding. */
458 		raw_value = item_sdata(item);
459 		if (!(raw_value & 0xfffffff0))
460 			parser->global.unit_exponent = snto32(raw_value, 4);
461 		else
462 			parser->global.unit_exponent = raw_value;
463 		return 0;
464 
465 	case HID_GLOBAL_ITEM_TAG_UNIT:
466 		parser->global.unit = item_udata(item);
467 		return 0;
468 
469 	case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
470 		parser->global.report_size = item_udata(item);
471 		if (parser->global.report_size > 256) {
472 			hid_err(parser->device, "invalid report_size %d\n",
473 					parser->global.report_size);
474 			return -1;
475 		}
476 		return 0;
477 
478 	case HID_GLOBAL_ITEM_TAG_REPORT_COUNT:
479 		parser->global.report_count = item_udata(item);
480 		if (parser->global.report_count > HID_MAX_USAGES) {
481 			hid_err(parser->device, "invalid report_count %d\n",
482 					parser->global.report_count);
483 			return -1;
484 		}
485 		return 0;
486 
487 	case HID_GLOBAL_ITEM_TAG_REPORT_ID:
488 		parser->global.report_id = item_udata(item);
489 		if (parser->global.report_id == 0 ||
490 		    parser->global.report_id >= HID_MAX_IDS) {
491 			hid_err(parser->device, "report_id %u is invalid\n",
492 				parser->global.report_id);
493 			return -1;
494 		}
495 		return 0;
496 
497 	default:
498 		hid_err(parser->device, "unknown global tag 0x%x\n", item->tag);
499 		return -1;
500 	}
501 }
502 
503 /*
504  * Process a local item.
505  */
506 
507 static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
508 {
509 	__u32 data;
510 	unsigned n;
511 	__u32 count;
512 
513 	data = item_udata(item);
514 
515 	switch (item->tag) {
516 	case HID_LOCAL_ITEM_TAG_DELIMITER:
517 
518 		if (data) {
519 			/*
520 			 * We treat items before the first delimiter
521 			 * as global to all usage sets (branch 0).
522 			 * In the moment we process only these global
523 			 * items and the first delimiter set.
524 			 */
525 			if (parser->local.delimiter_depth != 0) {
526 				hid_err(parser->device, "nested delimiters\n");
527 				return -1;
528 			}
529 			parser->local.delimiter_depth++;
530 			parser->local.delimiter_branch++;
531 		} else {
532 			if (parser->local.delimiter_depth < 1) {
533 				hid_err(parser->device, "bogus close delimiter\n");
534 				return -1;
535 			}
536 			parser->local.delimiter_depth--;
537 		}
538 		return 0;
539 
540 	case HID_LOCAL_ITEM_TAG_USAGE:
541 
542 		if (parser->local.delimiter_branch > 1) {
543 			dbg_hid("alternative usage ignored\n");
544 			return 0;
545 		}
546 
547 		return hid_add_usage(parser, data, item->size);
548 
549 	case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
550 
551 		if (parser->local.delimiter_branch > 1) {
552 			dbg_hid("alternative usage ignored\n");
553 			return 0;
554 		}
555 
556 		parser->local.usage_minimum = data;
557 		return 0;
558 
559 	case HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM:
560 
561 		if (parser->local.delimiter_branch > 1) {
562 			dbg_hid("alternative usage ignored\n");
563 			return 0;
564 		}
565 
566 		count = data - parser->local.usage_minimum;
567 		if (count + parser->local.usage_index >= HID_MAX_USAGES) {
568 			/*
569 			 * We do not warn if the name is not set, we are
570 			 * actually pre-scanning the device.
571 			 */
572 			if (dev_name(&parser->device->dev))
573 				hid_warn(parser->device,
574 					 "ignoring exceeding usage max\n");
575 			data = HID_MAX_USAGES - parser->local.usage_index +
576 				parser->local.usage_minimum - 1;
577 			if (data <= 0) {
578 				hid_err(parser->device,
579 					"no more usage index available\n");
580 				return -1;
581 			}
582 		}
583 
584 		for (n = parser->local.usage_minimum; n <= data; n++)
585 			if (hid_add_usage(parser, n, item->size)) {
586 				dbg_hid("hid_add_usage failed\n");
587 				return -1;
588 			}
589 		return 0;
590 
591 	default:
592 
593 		dbg_hid("unknown local item tag 0x%x\n", item->tag);
594 		return 0;
595 	}
596 	return 0;
597 }
598 
599 /*
600  * Concatenate Usage Pages into Usages where relevant:
601  * As per specification, 6.2.2.8: "When the parser encounters a main item it
602  * concatenates the last declared Usage Page with a Usage to form a complete
603  * usage value."
604  */
605 
606 static void hid_concatenate_last_usage_page(struct hid_parser *parser)
607 {
608 	int i;
609 	unsigned int usage_page;
610 	unsigned int current_page;
611 
612 	if (!parser->local.usage_index)
613 		return;
614 
615 	usage_page = parser->global.usage_page;
616 
617 	/*
618 	 * Concatenate usage page again only if last declared Usage Page
619 	 * has not been already used in previous usages concatenation
620 	 */
621 	for (i = parser->local.usage_index - 1; i >= 0; i--) {
622 		if (parser->local.usage_size[i] > 2)
623 			/* Ignore extended usages */
624 			continue;
625 
626 		current_page = parser->local.usage[i] >> 16;
627 		if (current_page == usage_page)
628 			break;
629 
630 		complete_usage(parser, i);
631 	}
632 }
633 
634 /*
635  * Process a main item.
636  */
637 
638 static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
639 {
640 	__u32 data;
641 	int ret;
642 
643 	hid_concatenate_last_usage_page(parser);
644 
645 	data = item_udata(item);
646 
647 	switch (item->tag) {
648 	case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
649 		ret = open_collection(parser, data & 0xff);
650 		break;
651 	case HID_MAIN_ITEM_TAG_END_COLLECTION:
652 		ret = close_collection(parser);
653 		break;
654 	case HID_MAIN_ITEM_TAG_INPUT:
655 		ret = hid_add_field(parser, HID_INPUT_REPORT, data);
656 		break;
657 	case HID_MAIN_ITEM_TAG_OUTPUT:
658 		ret = hid_add_field(parser, HID_OUTPUT_REPORT, data);
659 		break;
660 	case HID_MAIN_ITEM_TAG_FEATURE:
661 		ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
662 		break;
663 	default:
664 		if (item->tag >= HID_MAIN_ITEM_TAG_RESERVED_MIN &&
665 			item->tag <= HID_MAIN_ITEM_TAG_RESERVED_MAX)
666 			hid_warn_ratelimited(parser->device, "reserved main item tag 0x%x\n", item->tag);
667 		else
668 			hid_warn_ratelimited(parser->device, "unknown main item tag 0x%x\n", item->tag);
669 		ret = 0;
670 	}
671 
672 	memset(&parser->local, 0, sizeof(parser->local));	/* Reset the local parser environment */
673 
674 	return ret;
675 }
676 
677 /*
678  * Process a reserved item.
679  */
680 
681 static int hid_parser_reserved(struct hid_parser *parser, struct hid_item *item)
682 {
683 	dbg_hid("reserved item type, tag 0x%x\n", item->tag);
684 	return 0;
685 }
686 
687 /*
688  * Free a report and all registered fields. The field->usage and
689  * field->value table's are allocated behind the field, so we need
690  * only to free(field) itself.
691  */
692 
693 static void hid_free_report(struct hid_report *report)
694 {
695 	unsigned n;
696 
697 	kfree(report->field_entries);
698 
699 	for (n = 0; n < report->maxfield; n++)
700 		kvfree(report->field[n]);
701 	kfree(report);
702 }
703 
704 /*
705  * Close report. This function returns the device
706  * state to the point prior to hid_open_report().
707  */
708 static void hid_close_report(struct hid_device *device)
709 {
710 	unsigned i, j;
711 
712 	for (i = 0; i < HID_REPORT_TYPES; i++) {
713 		struct hid_report_enum *report_enum = device->report_enum + i;
714 
715 		for (j = 0; j < HID_MAX_IDS; j++) {
716 			struct hid_report *report = report_enum->report_id_hash[j];
717 			if (report)
718 				hid_free_report(report);
719 		}
720 		memset(report_enum, 0, sizeof(*report_enum));
721 		INIT_LIST_HEAD(&report_enum->report_list);
722 	}
723 
724 	/*
725 	 * If the HID driver had a rdesc_fixup() callback, dev->rdesc
726 	 * will be allocated by hid-core and needs to be freed.
727 	 * Otherwise, it is either equal to dev_rdesc or bpf_rdesc, in
728 	 * which cases it'll be freed later on device removal or destroy.
729 	 */
730 	if (device->rdesc != device->dev_rdesc && device->rdesc != device->bpf_rdesc)
731 		kfree(device->rdesc);
732 	device->rdesc = NULL;
733 	device->rsize = 0;
734 
735 	kfree(device->collection);
736 	device->collection = NULL;
737 	device->collection_size = 0;
738 	device->maxcollection = 0;
739 	device->maxapplication = 0;
740 
741 	device->status &= ~HID_STAT_PARSED;
742 }
743 
744 static inline void hid_free_bpf_rdesc(struct hid_device *hdev)
745 {
746 	/* bpf_rdesc is either equal to dev_rdesc or allocated by call_hid_bpf_rdesc_fixup() */
747 	if (hdev->bpf_rdesc != hdev->dev_rdesc)
748 		kfree(hdev->bpf_rdesc);
749 	hdev->bpf_rdesc = NULL;
750 }
751 
752 /*
753  * Free a device structure, all reports, and all fields.
754  */
755 
756 void hiddev_free(struct kref *ref)
757 {
758 	struct hid_device *hid = container_of(ref, struct hid_device, ref);
759 
760 	hid_close_report(hid);
761 	hid_free_bpf_rdesc(hid);
762 	kfree(hid->dev_rdesc);
763 	kfree(hid);
764 }
765 
766 static void hid_device_release(struct device *dev)
767 {
768 	struct hid_device *hid = to_hid_device(dev);
769 
770 	kref_put(&hid->ref, hiddev_free);
771 }
772 
773 /*
774  * Fetch a report description item from the data stream. We support long
775  * items, though they are not used yet.
776  */
777 
778 static const u8 *fetch_item(const __u8 *start, const __u8 *end, struct hid_item *item)
779 {
780 	u8 b;
781 
782 	if ((end - start) <= 0)
783 		return NULL;
784 
785 	b = *start++;
786 
787 	item->type = (b >> 2) & 3;
788 	item->tag  = (b >> 4) & 15;
789 
790 	if (item->tag == HID_ITEM_TAG_LONG) {
791 
792 		item->format = HID_ITEM_FORMAT_LONG;
793 
794 		if ((end - start) < 2)
795 			return NULL;
796 
797 		item->size = *start++;
798 		item->tag  = *start++;
799 
800 		if ((end - start) < item->size)
801 			return NULL;
802 
803 		item->data.longdata = start;
804 		start += item->size;
805 		return start;
806 	}
807 
808 	item->format = HID_ITEM_FORMAT_SHORT;
809 	item->size = BIT(b & 3) >> 1; /* 0, 1, 2, 3 -> 0, 1, 2, 4 */
810 
811 	if (end - start < item->size)
812 		return NULL;
813 
814 	switch (item->size) {
815 	case 0:
816 		break;
817 
818 	case 1:
819 		item->data.u8 = *start;
820 		break;
821 
822 	case 2:
823 		item->data.u16 = get_unaligned_le16(start);
824 		break;
825 
826 	case 4:
827 		item->data.u32 = get_unaligned_le32(start);
828 		break;
829 	}
830 
831 	return start + item->size;
832 }
833 
834 static void hid_scan_input_usage(struct hid_parser *parser, u32 usage)
835 {
836 	struct hid_device *hid = parser->device;
837 
838 	if (usage == HID_DG_CONTACTID)
839 		hid->group = HID_GROUP_MULTITOUCH;
840 }
841 
842 static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage)
843 {
844 	if (usage == 0xff0000c5 && parser->global.report_count == 256 &&
845 	    parser->global.report_size == 8)
846 		parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
847 
848 	if (usage == 0xff0000c6 && parser->global.report_count == 1 &&
849 	    parser->global.report_size == 8)
850 		parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
851 }
852 
853 static void hid_scan_collection(struct hid_parser *parser, unsigned type)
854 {
855 	struct hid_device *hid = parser->device;
856 	int i;
857 
858 	if (((parser->global.usage_page << 16) == HID_UP_SENSOR) &&
859 	    (type == HID_COLLECTION_PHYSICAL ||
860 	     type == HID_COLLECTION_APPLICATION))
861 		hid->group = HID_GROUP_SENSOR_HUB;
862 
863 	if (hid->vendor == USB_VENDOR_ID_MICROSOFT &&
864 	    hid->product == USB_DEVICE_ID_MS_POWER_COVER &&
865 	    hid->group == HID_GROUP_MULTITOUCH)
866 		hid->group = HID_GROUP_GENERIC;
867 
868 	if ((parser->global.usage_page << 16) == HID_UP_GENDESK)
869 		for (i = 0; i < parser->local.usage_index; i++)
870 			if (parser->local.usage[i] == HID_GD_POINTER)
871 				parser->scan_flags |= HID_SCAN_FLAG_GD_POINTER;
872 
873 	if ((parser->global.usage_page << 16) >= HID_UP_MSVENDOR)
874 		parser->scan_flags |= HID_SCAN_FLAG_VENDOR_SPECIFIC;
875 
876 	if ((parser->global.usage_page << 16) == HID_UP_GOOGLEVENDOR)
877 		for (i = 0; i < parser->local.usage_index; i++)
878 			if (parser->local.usage[i] ==
879 					(HID_UP_GOOGLEVENDOR | 0x0001))
880 				parser->device->group =
881 					HID_GROUP_VIVALDI;
882 }
883 
884 static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
885 {
886 	__u32 data;
887 	int i;
888 
889 	hid_concatenate_last_usage_page(parser);
890 
891 	data = item_udata(item);
892 
893 	switch (item->tag) {
894 	case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
895 		hid_scan_collection(parser, data & 0xff);
896 		break;
897 	case HID_MAIN_ITEM_TAG_END_COLLECTION:
898 		break;
899 	case HID_MAIN_ITEM_TAG_INPUT:
900 		/* ignore constant inputs, they will be ignored by hid-input */
901 		if (data & HID_MAIN_ITEM_CONSTANT)
902 			break;
903 		for (i = 0; i < parser->local.usage_index; i++)
904 			hid_scan_input_usage(parser, parser->local.usage[i]);
905 		break;
906 	case HID_MAIN_ITEM_TAG_OUTPUT:
907 		break;
908 	case HID_MAIN_ITEM_TAG_FEATURE:
909 		for (i = 0; i < parser->local.usage_index; i++)
910 			hid_scan_feature_usage(parser, parser->local.usage[i]);
911 		break;
912 	}
913 
914 	/* Reset the local parser environment */
915 	memset(&parser->local, 0, sizeof(parser->local));
916 
917 	return 0;
918 }
919 
920 /*
921  * Scan a report descriptor before the device is added to the bus.
922  * Sets device groups and other properties that determine what driver
923  * to load.
924  */
925 static int hid_scan_report(struct hid_device *hid)
926 {
927 	struct hid_parser *parser;
928 	struct hid_item item;
929 	const __u8 *start = hid->dev_rdesc;
930 	const __u8 *end = start + hid->dev_rsize;
931 	static int (*dispatch_type[])(struct hid_parser *parser,
932 				      struct hid_item *item) = {
933 		hid_scan_main,
934 		hid_parser_global,
935 		hid_parser_local,
936 		hid_parser_reserved
937 	};
938 
939 	parser = vzalloc(sizeof(struct hid_parser));
940 	if (!parser)
941 		return -ENOMEM;
942 
943 	parser->device = hid;
944 	hid->group = HID_GROUP_GENERIC;
945 
946 	/*
947 	 * In case we are re-scanning after a BPF has been loaded,
948 	 * we need to use the bpf report descriptor, not the original one.
949 	 */
950 	if (hid->bpf_rdesc && hid->bpf_rsize) {
951 		start = hid->bpf_rdesc;
952 		end = start + hid->bpf_rsize;
953 	}
954 
955 	/*
956 	 * The parsing is simpler than the one in hid_open_report() as we should
957 	 * be robust against hid errors. Those errors will be raised by
958 	 * hid_open_report() anyway.
959 	 */
960 	while ((start = fetch_item(start, end, &item)) != NULL)
961 		dispatch_type[item.type](parser, &item);
962 
963 	/*
964 	 * Handle special flags set during scanning.
965 	 */
966 	if ((parser->scan_flags & HID_SCAN_FLAG_MT_WIN_8) &&
967 	    (hid->group == HID_GROUP_MULTITOUCH))
968 		hid->group = HID_GROUP_MULTITOUCH_WIN_8;
969 
970 	/*
971 	 * Vendor specific handlings
972 	 */
973 	switch (hid->vendor) {
974 	case USB_VENDOR_ID_WACOM:
975 		hid->group = HID_GROUP_WACOM;
976 		break;
977 	case USB_VENDOR_ID_SYNAPTICS:
978 		if (hid->group == HID_GROUP_GENERIC)
979 			if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC)
980 			    && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER))
981 				/*
982 				 * hid-rmi should take care of them,
983 				 * not hid-generic
984 				 */
985 				hid->group = HID_GROUP_RMI;
986 		break;
987 	}
988 
989 	kfree(parser->collection_stack);
990 	vfree(parser);
991 	return 0;
992 }
993 
994 /**
995  * hid_parse_report - parse device report
996  *
997  * @hid: hid device
998  * @start: report start
999  * @size: report size
1000  *
1001  * Allocate the device report as read by the bus driver. This function should
1002  * only be called from parse() in ll drivers.
1003  */
1004 int hid_parse_report(struct hid_device *hid, const __u8 *start, unsigned size)
1005 {
1006 	hid->dev_rdesc = kmemdup(start, size, GFP_KERNEL);
1007 	if (!hid->dev_rdesc)
1008 		return -ENOMEM;
1009 	hid->dev_rsize = size;
1010 	return 0;
1011 }
1012 EXPORT_SYMBOL_GPL(hid_parse_report);
1013 
1014 static const char * const hid_report_names[] = {
1015 	"HID_INPUT_REPORT",
1016 	"HID_OUTPUT_REPORT",
1017 	"HID_FEATURE_REPORT",
1018 };
1019 /**
1020  * hid_validate_values - validate existing device report's value indexes
1021  *
1022  * @hid: hid device
1023  * @type: which report type to examine
1024  * @id: which report ID to examine (0 for first)
1025  * @field_index: which report field to examine
1026  * @report_counts: expected number of values
1027  *
1028  * Validate the number of values in a given field of a given report, after
1029  * parsing.
1030  */
1031 struct hid_report *hid_validate_values(struct hid_device *hid,
1032 				       enum hid_report_type type, unsigned int id,
1033 				       unsigned int field_index,
1034 				       unsigned int report_counts)
1035 {
1036 	struct hid_report *report;
1037 
1038 	if (type > HID_FEATURE_REPORT) {
1039 		hid_err(hid, "invalid HID report type %u\n", type);
1040 		return NULL;
1041 	}
1042 
1043 	if (id >= HID_MAX_IDS) {
1044 		hid_err(hid, "invalid HID report id %u\n", id);
1045 		return NULL;
1046 	}
1047 
1048 	/*
1049 	 * Explicitly not using hid_get_report() here since it depends on
1050 	 * ->numbered being checked, which may not always be the case when
1051 	 * drivers go to access report values.
1052 	 */
1053 	if (id == 0) {
1054 		/*
1055 		 * Validating on id 0 means we should examine the first
1056 		 * report in the list.
1057 		 */
1058 		report = list_first_entry_or_null(
1059 				&hid->report_enum[type].report_list,
1060 				struct hid_report, list);
1061 	} else {
1062 		report = hid->report_enum[type].report_id_hash[id];
1063 	}
1064 	if (!report) {
1065 		hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
1066 		return NULL;
1067 	}
1068 	if (report->maxfield <= field_index) {
1069 		hid_err(hid, "not enough fields in %s %u\n",
1070 			hid_report_names[type], id);
1071 		return NULL;
1072 	}
1073 	if (report->field[field_index]->report_count < report_counts) {
1074 		hid_err(hid, "not enough values in %s %u field %u\n",
1075 			hid_report_names[type], id, field_index);
1076 		return NULL;
1077 	}
1078 	return report;
1079 }
1080 EXPORT_SYMBOL_GPL(hid_validate_values);
1081 
1082 static int hid_calculate_multiplier(struct hid_device *hid,
1083 				     struct hid_field *multiplier)
1084 {
1085 	int m;
1086 	__s32 v = *multiplier->value;
1087 	__s32 lmin = multiplier->logical_minimum;
1088 	__s32 lmax = multiplier->logical_maximum;
1089 	__s32 pmin = multiplier->physical_minimum;
1090 	__s32 pmax = multiplier->physical_maximum;
1091 
1092 	/*
1093 	 * "Because OS implementations will generally divide the control's
1094 	 * reported count by the Effective Resolution Multiplier, designers
1095 	 * should take care not to establish a potential Effective
1096 	 * Resolution Multiplier of zero."
1097 	 * HID Usage Table, v1.12, Section 4.3.1, p31
1098 	 */
1099 	if (lmax - lmin == 0)
1100 		return 1;
1101 	/*
1102 	 * Handling the unit exponent is left as an exercise to whoever
1103 	 * finds a device where that exponent is not 0.
1104 	 */
1105 	m = ((v - lmin)/(lmax - lmin) * (pmax - pmin) + pmin);
1106 	if (unlikely(multiplier->unit_exponent != 0)) {
1107 		hid_warn(hid,
1108 			 "unsupported Resolution Multiplier unit exponent %d\n",
1109 			 multiplier->unit_exponent);
1110 	}
1111 
1112 	/* There are no devices with an effective multiplier > 255 */
1113 	if (unlikely(m == 0 || m > 255 || m < -255)) {
1114 		hid_warn(hid, "unsupported Resolution Multiplier %d\n", m);
1115 		m = 1;
1116 	}
1117 
1118 	return m;
1119 }
1120 
1121 static void hid_apply_multiplier_to_field(struct hid_device *hid,
1122 					  struct hid_field *field,
1123 					  struct hid_collection *multiplier_collection,
1124 					  int effective_multiplier)
1125 {
1126 	struct hid_collection *collection;
1127 	struct hid_usage *usage;
1128 	int i;
1129 
1130 	/*
1131 	 * If multiplier_collection is NULL, the multiplier applies
1132 	 * to all fields in the report.
1133 	 * Otherwise, it is the Logical Collection the multiplier applies to
1134 	 * but our field may be in a subcollection of that collection.
1135 	 */
1136 	for (i = 0; i < field->maxusage; i++) {
1137 		usage = &field->usage[i];
1138 
1139 		collection = &hid->collection[usage->collection_index];
1140 		while (collection->parent_idx != -1 &&
1141 		       collection != multiplier_collection)
1142 			collection = &hid->collection[collection->parent_idx];
1143 
1144 		if (collection->parent_idx != -1 ||
1145 		    multiplier_collection == NULL)
1146 			usage->resolution_multiplier = effective_multiplier;
1147 
1148 	}
1149 }
1150 
1151 static void hid_apply_multiplier(struct hid_device *hid,
1152 				 struct hid_field *multiplier)
1153 {
1154 	struct hid_report_enum *rep_enum;
1155 	struct hid_report *rep;
1156 	struct hid_field *field;
1157 	struct hid_collection *multiplier_collection;
1158 	int effective_multiplier;
1159 	int i;
1160 
1161 	/*
1162 	 * "The Resolution Multiplier control must be contained in the same
1163 	 * Logical Collection as the control(s) to which it is to be applied.
1164 	 * If no Resolution Multiplier is defined, then the Resolution
1165 	 * Multiplier defaults to 1.  If more than one control exists in a
1166 	 * Logical Collection, the Resolution Multiplier is associated with
1167 	 * all controls in the collection. If no Logical Collection is
1168 	 * defined, the Resolution Multiplier is associated with all
1169 	 * controls in the report."
1170 	 * HID Usage Table, v1.12, Section 4.3.1, p30
1171 	 *
1172 	 * Thus, search from the current collection upwards until we find a
1173 	 * logical collection. Then search all fields for that same parent
1174 	 * collection. Those are the fields the multiplier applies to.
1175 	 *
1176 	 * If we have more than one multiplier, it will overwrite the
1177 	 * applicable fields later.
1178 	 */
1179 	multiplier_collection = &hid->collection[multiplier->usage->collection_index];
1180 	while (multiplier_collection->parent_idx != -1 &&
1181 	       multiplier_collection->type != HID_COLLECTION_LOGICAL)
1182 		multiplier_collection = &hid->collection[multiplier_collection->parent_idx];
1183 	if (multiplier_collection->type != HID_COLLECTION_LOGICAL)
1184 		multiplier_collection = NULL;
1185 
1186 	effective_multiplier = hid_calculate_multiplier(hid, multiplier);
1187 
1188 	rep_enum = &hid->report_enum[HID_INPUT_REPORT];
1189 	list_for_each_entry(rep, &rep_enum->report_list, list) {
1190 		for (i = 0; i < rep->maxfield; i++) {
1191 			field = rep->field[i];
1192 			hid_apply_multiplier_to_field(hid, field,
1193 						      multiplier_collection,
1194 						      effective_multiplier);
1195 		}
1196 	}
1197 }
1198 
1199 /*
1200  * hid_setup_resolution_multiplier - set up all resolution multipliers
1201  *
1202  * @device: hid device
1203  *
1204  * Search for all Resolution Multiplier Feature Reports and apply their
1205  * value to all matching Input items. This only updates the internal struct
1206  * fields.
1207  *
1208  * The Resolution Multiplier is applied by the hardware. If the multiplier
1209  * is anything other than 1, the hardware will send pre-multiplied events
1210  * so that the same physical interaction generates an accumulated
1211  *	accumulated_value = value * * multiplier
1212  * This may be achieved by sending
1213  * - "value * multiplier" for each event, or
1214  * - "value" but "multiplier" times as frequently, or
1215  * - a combination of the above
1216  * The only guarantee is that the same physical interaction always generates
1217  * an accumulated 'value * multiplier'.
1218  *
1219  * This function must be called before any event processing and after
1220  * any SetRequest to the Resolution Multiplier.
1221  */
1222 void hid_setup_resolution_multiplier(struct hid_device *hid)
1223 {
1224 	struct hid_report_enum *rep_enum;
1225 	struct hid_report *rep;
1226 	struct hid_usage *usage;
1227 	int i, j;
1228 
1229 	rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
1230 	list_for_each_entry(rep, &rep_enum->report_list, list) {
1231 		for (i = 0; i < rep->maxfield; i++) {
1232 			/* Ignore if report count is out of bounds. */
1233 			if (rep->field[i]->report_count < 1)
1234 				continue;
1235 
1236 			for (j = 0; j < rep->field[i]->maxusage; j++) {
1237 				usage = &rep->field[i]->usage[j];
1238 				if (usage->hid == HID_GD_RESOLUTION_MULTIPLIER)
1239 					hid_apply_multiplier(hid,
1240 							     rep->field[i]);
1241 			}
1242 		}
1243 	}
1244 }
1245 EXPORT_SYMBOL_GPL(hid_setup_resolution_multiplier);
1246 
1247 /**
1248  * hid_open_report - open a driver-specific device report
1249  *
1250  * @device: hid device
1251  *
1252  * Parse a report description into a hid_device structure. Reports are
1253  * enumerated, fields are attached to these reports.
1254  * 0 returned on success, otherwise nonzero error value.
1255  *
1256  * This function (or the equivalent hid_parse() macro) should only be
1257  * called from probe() in drivers, before starting the device.
1258  */
1259 int hid_open_report(struct hid_device *device)
1260 {
1261 	struct hid_parser *parser;
1262 	struct hid_item item;
1263 	unsigned int size;
1264 	const __u8 *start;
1265 	const __u8 *end;
1266 	const __u8 *next;
1267 	int ret;
1268 	int i;
1269 	static int (*dispatch_type[])(struct hid_parser *parser,
1270 				      struct hid_item *item) = {
1271 		hid_parser_main,
1272 		hid_parser_global,
1273 		hid_parser_local,
1274 		hid_parser_reserved
1275 	};
1276 
1277 	if (WARN_ON(device->status & HID_STAT_PARSED))
1278 		return -EBUSY;
1279 
1280 	start = device->bpf_rdesc;
1281 	if (WARN_ON(!start))
1282 		return -ENODEV;
1283 	size = device->bpf_rsize;
1284 
1285 	if (device->driver->report_fixup) {
1286 		/*
1287 		 * device->driver->report_fixup() needs to work
1288 		 * on a copy of our report descriptor so it can
1289 		 * change it.
1290 		 */
1291 		__u8 *buf = kmemdup(start, size, GFP_KERNEL);
1292 
1293 		if (buf == NULL)
1294 			return -ENOMEM;
1295 
1296 		start = device->driver->report_fixup(device, buf, &size);
1297 
1298 		/*
1299 		 * The second kmemdup is required in case report_fixup() returns
1300 		 * a static read-only memory, but we have no idea if that memory
1301 		 * needs to be cleaned up or not at the end.
1302 		 */
1303 		start = kmemdup(start, size, GFP_KERNEL);
1304 		kfree(buf);
1305 		if (start == NULL)
1306 			return -ENOMEM;
1307 	}
1308 
1309 	device->rdesc = start;
1310 	device->rsize = size;
1311 
1312 	parser = vzalloc(sizeof(struct hid_parser));
1313 	if (!parser) {
1314 		ret = -ENOMEM;
1315 		goto alloc_err;
1316 	}
1317 
1318 	parser->device = device;
1319 
1320 	end = start + size;
1321 
1322 	device->collection = kcalloc(HID_DEFAULT_NUM_COLLECTIONS,
1323 				     sizeof(struct hid_collection), GFP_KERNEL);
1324 	if (!device->collection) {
1325 		ret = -ENOMEM;
1326 		goto err;
1327 	}
1328 	device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
1329 	for (i = 0; i < HID_DEFAULT_NUM_COLLECTIONS; i++)
1330 		device->collection[i].parent_idx = -1;
1331 
1332 	ret = -EINVAL;
1333 	while ((next = fetch_item(start, end, &item)) != NULL) {
1334 		start = next;
1335 
1336 		if (item.format != HID_ITEM_FORMAT_SHORT) {
1337 			hid_err(device, "unexpected long global item\n");
1338 			goto err;
1339 		}
1340 
1341 		if (dispatch_type[item.type](parser, &item)) {
1342 			hid_err(device, "item %u %u %u %u parsing failed\n",
1343 				item.format, (unsigned)item.size,
1344 				(unsigned)item.type, (unsigned)item.tag);
1345 			goto err;
1346 		}
1347 
1348 		if (start == end) {
1349 			if (parser->collection_stack_ptr) {
1350 				hid_err(device, "unbalanced collection at end of report description\n");
1351 				goto err;
1352 			}
1353 			if (parser->local.delimiter_depth) {
1354 				hid_err(device, "unbalanced delimiter at end of report description\n");
1355 				goto err;
1356 			}
1357 
1358 			/*
1359 			 * fetch initial values in case the device's
1360 			 * default multiplier isn't the recommended 1
1361 			 */
1362 			hid_setup_resolution_multiplier(device);
1363 
1364 			kfree(parser->collection_stack);
1365 			vfree(parser);
1366 			device->status |= HID_STAT_PARSED;
1367 
1368 			return 0;
1369 		}
1370 	}
1371 
1372 	hid_err(device, "item fetching failed at offset %u/%u\n",
1373 		size - (unsigned int)(end - start), size);
1374 err:
1375 	kfree(parser->collection_stack);
1376 alloc_err:
1377 	vfree(parser);
1378 	hid_close_report(device);
1379 	return ret;
1380 }
1381 EXPORT_SYMBOL_GPL(hid_open_report);
1382 
1383 /*
1384  * Extract/implement a data field from/to a little endian report (bit array).
1385  *
1386  * Code sort-of follows HID spec:
1387  *     http://www.usb.org/developers/hidpage/HID1_11.pdf
1388  *
1389  * While the USB HID spec allows unlimited length bit fields in "report
1390  * descriptors", most devices never use more than 16 bits.
1391  * One model of UPS is claimed to report "LINEV" as a 32-bit field.
1392  * Search linux-kernel and linux-usb-devel archives for "hid-core extract".
1393  */
1394 
1395 static u32 __extract(u8 *report, unsigned offset, int n)
1396 {
1397 	unsigned int idx = offset / 8;
1398 	unsigned int bit_nr = 0;
1399 	unsigned int bit_shift = offset % 8;
1400 	int bits_to_copy = 8 - bit_shift;
1401 	u32 value = 0;
1402 	u32 mask = n < 32 ? (1U << n) - 1 : ~0U;
1403 
1404 	while (n > 0) {
1405 		value |= ((u32)report[idx] >> bit_shift) << bit_nr;
1406 		n -= bits_to_copy;
1407 		bit_nr += bits_to_copy;
1408 		bits_to_copy = 8;
1409 		bit_shift = 0;
1410 		idx++;
1411 	}
1412 
1413 	return value & mask;
1414 }
1415 
1416 u32 hid_field_extract(const struct hid_device *hid, u8 *report,
1417 			unsigned offset, unsigned n)
1418 {
1419 	if (n > 32) {
1420 		hid_warn_once(hid, "%s() called with n (%d) > 32! (%s)\n",
1421 			      __func__, n, current->comm);
1422 		n = 32;
1423 	}
1424 
1425 	return __extract(report, offset, n);
1426 }
1427 EXPORT_SYMBOL_GPL(hid_field_extract);
1428 
1429 /*
1430  * "implement" : set bits in a little endian bit stream.
1431  * Same concepts as "extract" (see comments above).
1432  * The data mangled in the bit stream remains in little endian
1433  * order the whole time. It make more sense to talk about
1434  * endianness of register values by considering a register
1435  * a "cached" copy of the little endian bit stream.
1436  */
1437 
1438 static void __implement(u8 *report, unsigned offset, int n, u32 value)
1439 {
1440 	unsigned int idx = offset / 8;
1441 	unsigned int bit_shift = offset % 8;
1442 	int bits_to_set = 8 - bit_shift;
1443 
1444 	while (n - bits_to_set >= 0) {
1445 		report[idx] &= ~(0xff << bit_shift);
1446 		report[idx] |= value << bit_shift;
1447 		value >>= bits_to_set;
1448 		n -= bits_to_set;
1449 		bits_to_set = 8;
1450 		bit_shift = 0;
1451 		idx++;
1452 	}
1453 
1454 	/* last nibble */
1455 	if (n) {
1456 		u8 bit_mask = ((1U << n) - 1);
1457 		report[idx] &= ~(bit_mask << bit_shift);
1458 		report[idx] |= value << bit_shift;
1459 	}
1460 }
1461 
1462 static void implement(const struct hid_device *hid, u8 *report,
1463 		      unsigned offset, unsigned n, u32 value)
1464 {
1465 	if (unlikely(n > 32)) {
1466 		hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n",
1467 			 __func__, n, current->comm);
1468 		n = 32;
1469 	} else if (n < 32) {
1470 		u32 m = (1U << n) - 1;
1471 
1472 		if (unlikely(value > m)) {
1473 			hid_warn(hid,
1474 				 "%s() called with too large value %d (n: %d)! (%s)\n",
1475 				 __func__, value, n, current->comm);
1476 			value &= m;
1477 		}
1478 	}
1479 
1480 	__implement(report, offset, n, value);
1481 }
1482 
1483 /*
1484  * Search an array for a value.
1485  */
1486 
1487 static int search(__s32 *array, __s32 value, unsigned n)
1488 {
1489 	while (n--) {
1490 		if (*array++ == value)
1491 			return 0;
1492 	}
1493 	return -1;
1494 }
1495 
1496 /**
1497  * hid_match_report - check if driver's raw_event should be called
1498  *
1499  * @hid: hid device
1500  * @report: hid report to match against
1501  *
1502  * compare hid->driver->report_table->report_type to report->type
1503  */
1504 static int hid_match_report(struct hid_device *hid, struct hid_report *report)
1505 {
1506 	const struct hid_report_id *id = hid->driver->report_table;
1507 
1508 	if (!id) /* NULL means all */
1509 		return 1;
1510 
1511 	for (; id->report_type != HID_TERMINATOR; id++)
1512 		if (id->report_type == HID_ANY_ID ||
1513 				id->report_type == report->type)
1514 			return 1;
1515 	return 0;
1516 }
1517 
1518 /**
1519  * hid_match_usage - check if driver's event should be called
1520  *
1521  * @hid: hid device
1522  * @usage: usage to match against
1523  *
1524  * compare hid->driver->usage_table->usage_{type,code} to
1525  * usage->usage_{type,code}
1526  */
1527 static int hid_match_usage(struct hid_device *hid, struct hid_usage *usage)
1528 {
1529 	const struct hid_usage_id *id = hid->driver->usage_table;
1530 
1531 	if (!id) /* NULL means all */
1532 		return 1;
1533 
1534 	for (; id->usage_type != HID_ANY_ID - 1; id++)
1535 		if ((id->usage_hid == HID_ANY_ID ||
1536 				id->usage_hid == usage->hid) &&
1537 				(id->usage_type == HID_ANY_ID ||
1538 				id->usage_type == usage->type) &&
1539 				(id->usage_code == HID_ANY_ID ||
1540 				 id->usage_code == usage->code))
1541 			return 1;
1542 	return 0;
1543 }
1544 
1545 static void hid_process_event(struct hid_device *hid, struct hid_field *field,
1546 		struct hid_usage *usage, __s32 value, int interrupt)
1547 {
1548 	struct hid_driver *hdrv = hid->driver;
1549 	int ret;
1550 
1551 	if (!list_empty(&hid->debug_list))
1552 		hid_dump_input(hid, usage, value);
1553 
1554 	if (hdrv && hdrv->event && hid_match_usage(hid, usage)) {
1555 		ret = hdrv->event(hid, field, usage, value);
1556 		if (ret != 0) {
1557 			if (ret < 0)
1558 				hid_err(hid, "%s's event failed with %d\n",
1559 						hdrv->name, ret);
1560 			return;
1561 		}
1562 	}
1563 
1564 	if (hid->claimed & HID_CLAIMED_INPUT)
1565 		hidinput_hid_event(hid, field, usage, value);
1566 	if (hid->claimed & HID_CLAIMED_HIDDEV && interrupt && hid->hiddev_hid_event)
1567 		hid->hiddev_hid_event(hid, field, usage, value);
1568 }
1569 
1570 /*
1571  * Checks if the given value is valid within this field
1572  */
1573 static inline int hid_array_value_is_valid(struct hid_field *field,
1574 					   __s32 value)
1575 {
1576 	__s32 min = field->logical_minimum;
1577 
1578 	/*
1579 	 * Value needs to be between logical min and max, and
1580 	 * (value - min) is used as an index in the usage array.
1581 	 * This array is of size field->maxusage
1582 	 */
1583 	return value >= min &&
1584 	       value <= field->logical_maximum &&
1585 	       value - min < field->maxusage;
1586 }
1587 
1588 /*
1589  * Fetch the field from the data. The field content is stored for next
1590  * report processing (we do differential reporting to the layer).
1591  */
1592 static void hid_input_fetch_field(struct hid_device *hid,
1593 				  struct hid_field *field,
1594 				  __u8 *data)
1595 {
1596 	unsigned n;
1597 	unsigned count = field->report_count;
1598 	unsigned offset = field->report_offset;
1599 	unsigned size = field->report_size;
1600 	__s32 min = field->logical_minimum;
1601 	__s32 *value;
1602 
1603 	value = field->new_value;
1604 	memset(value, 0, count * sizeof(__s32));
1605 	field->ignored = false;
1606 
1607 	for (n = 0; n < count; n++) {
1608 
1609 		value[n] = min < 0 ?
1610 			snto32(hid_field_extract(hid, data, offset + n * size,
1611 			       size), size) :
1612 			hid_field_extract(hid, data, offset + n * size, size);
1613 
1614 		/* Ignore report if ErrorRollOver */
1615 		if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
1616 		    hid_array_value_is_valid(field, value[n]) &&
1617 		    field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1) {
1618 			field->ignored = true;
1619 			return;
1620 		}
1621 	}
1622 }
1623 
1624 /*
1625  * Process a received variable field.
1626  */
1627 
1628 static void hid_input_var_field(struct hid_device *hid,
1629 				struct hid_field *field,
1630 				int interrupt)
1631 {
1632 	unsigned int count = field->report_count;
1633 	__s32 *value = field->new_value;
1634 	unsigned int n;
1635 
1636 	for (n = 0; n < count; n++)
1637 		hid_process_event(hid,
1638 				  field,
1639 				  &field->usage[n],
1640 				  value[n],
1641 				  interrupt);
1642 
1643 	memcpy(field->value, value, count * sizeof(__s32));
1644 }
1645 
1646 /*
1647  * Process a received array field. The field content is stored for
1648  * next report processing (we do differential reporting to the layer).
1649  */
1650 
1651 static void hid_input_array_field(struct hid_device *hid,
1652 				  struct hid_field *field,
1653 				  int interrupt)
1654 {
1655 	unsigned int n;
1656 	unsigned int count = field->report_count;
1657 	__s32 min = field->logical_minimum;
1658 	__s32 *value;
1659 
1660 	value = field->new_value;
1661 
1662 	/* ErrorRollOver */
1663 	if (field->ignored)
1664 		return;
1665 
1666 	for (n = 0; n < count; n++) {
1667 		if (hid_array_value_is_valid(field, field->value[n]) &&
1668 		    search(value, field->value[n], count))
1669 			hid_process_event(hid,
1670 					  field,
1671 					  &field->usage[field->value[n] - min],
1672 					  0,
1673 					  interrupt);
1674 
1675 		if (hid_array_value_is_valid(field, value[n]) &&
1676 		    search(field->value, value[n], count))
1677 			hid_process_event(hid,
1678 					  field,
1679 					  &field->usage[value[n] - min],
1680 					  1,
1681 					  interrupt);
1682 	}
1683 
1684 	memcpy(field->value, value, count * sizeof(__s32));
1685 }
1686 
1687 /*
1688  * Analyse a received report, and fetch the data from it. The field
1689  * content is stored for next report processing (we do differential
1690  * reporting to the layer).
1691  */
1692 static void hid_process_report(struct hid_device *hid,
1693 			       struct hid_report *report,
1694 			       __u8 *data,
1695 			       int interrupt)
1696 {
1697 	unsigned int a;
1698 	struct hid_field_entry *entry;
1699 	struct hid_field *field;
1700 
1701 	/* first retrieve all incoming values in data */
1702 	for (a = 0; a < report->maxfield; a++)
1703 		hid_input_fetch_field(hid, report->field[a], data);
1704 
1705 	if (!list_empty(&report->field_entry_list)) {
1706 		/* INPUT_REPORT, we have a priority list of fields */
1707 		list_for_each_entry(entry,
1708 				    &report->field_entry_list,
1709 				    list) {
1710 			field = entry->field;
1711 
1712 			if (field->flags & HID_MAIN_ITEM_VARIABLE)
1713 				hid_process_event(hid,
1714 						  field,
1715 						  &field->usage[entry->index],
1716 						  field->new_value[entry->index],
1717 						  interrupt);
1718 			else
1719 				hid_input_array_field(hid, field, interrupt);
1720 		}
1721 
1722 		/* we need to do the memcpy at the end for var items */
1723 		for (a = 0; a < report->maxfield; a++) {
1724 			field = report->field[a];
1725 
1726 			if (field->flags & HID_MAIN_ITEM_VARIABLE)
1727 				memcpy(field->value, field->new_value,
1728 				       field->report_count * sizeof(__s32));
1729 		}
1730 	} else {
1731 		/* FEATURE_REPORT, regular processing */
1732 		for (a = 0; a < report->maxfield; a++) {
1733 			field = report->field[a];
1734 
1735 			if (field->flags & HID_MAIN_ITEM_VARIABLE)
1736 				hid_input_var_field(hid, field, interrupt);
1737 			else
1738 				hid_input_array_field(hid, field, interrupt);
1739 		}
1740 	}
1741 }
1742 
1743 /*
1744  * Insert a given usage_index in a field in the list
1745  * of processed usages in the report.
1746  *
1747  * The elements of lower priority score are processed
1748  * first.
1749  */
1750 static void __hid_insert_field_entry(struct hid_device *hid,
1751 				     struct hid_report *report,
1752 				     struct hid_field_entry *entry,
1753 				     struct hid_field *field,
1754 				     unsigned int usage_index)
1755 {
1756 	struct hid_field_entry *next;
1757 
1758 	entry->field = field;
1759 	entry->index = usage_index;
1760 	entry->priority = field->usages_priorities[usage_index];
1761 
1762 	/* insert the element at the correct position */
1763 	list_for_each_entry(next,
1764 			    &report->field_entry_list,
1765 			    list) {
1766 		/*
1767 		 * the priority of our element is strictly higher
1768 		 * than the next one, insert it before
1769 		 */
1770 		if (entry->priority > next->priority) {
1771 			list_add_tail(&entry->list, &next->list);
1772 			return;
1773 		}
1774 	}
1775 
1776 	/* lowest priority score: insert at the end */
1777 	list_add_tail(&entry->list, &report->field_entry_list);
1778 }
1779 
1780 static void hid_report_process_ordering(struct hid_device *hid,
1781 					struct hid_report *report)
1782 {
1783 	struct hid_field *field;
1784 	struct hid_field_entry *entries;
1785 	unsigned int a, u, usages;
1786 	unsigned int count = 0;
1787 
1788 	/* count the number of individual fields in the report */
1789 	for (a = 0; a < report->maxfield; a++) {
1790 		field = report->field[a];
1791 
1792 		if (field->flags & HID_MAIN_ITEM_VARIABLE)
1793 			count += field->report_count;
1794 		else
1795 			count++;
1796 	}
1797 
1798 	/* allocate the memory to process the fields */
1799 	entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
1800 	if (!entries)
1801 		return;
1802 
1803 	report->field_entries = entries;
1804 
1805 	/*
1806 	 * walk through all fields in the report and
1807 	 * store them by priority order in report->field_entry_list
1808 	 *
1809 	 * - Var elements are individualized (field + usage_index)
1810 	 * - Arrays are taken as one, we can not chose an order for them
1811 	 */
1812 	usages = 0;
1813 	for (a = 0; a < report->maxfield; a++) {
1814 		field = report->field[a];
1815 
1816 		if (field->flags & HID_MAIN_ITEM_VARIABLE) {
1817 			for (u = 0; u < field->report_count; u++) {
1818 				__hid_insert_field_entry(hid, report,
1819 							 &entries[usages],
1820 							 field, u);
1821 				usages++;
1822 			}
1823 		} else {
1824 			__hid_insert_field_entry(hid, report, &entries[usages],
1825 						 field, 0);
1826 			usages++;
1827 		}
1828 	}
1829 }
1830 
1831 static void hid_process_ordering(struct hid_device *hid)
1832 {
1833 	struct hid_report *report;
1834 	struct hid_report_enum *report_enum = &hid->report_enum[HID_INPUT_REPORT];
1835 
1836 	list_for_each_entry(report, &report_enum->report_list, list)
1837 		hid_report_process_ordering(hid, report);
1838 }
1839 
1840 /*
1841  * Output the field into the report.
1842  */
1843 
1844 static void hid_output_field(const struct hid_device *hid,
1845 			     struct hid_field *field, __u8 *data)
1846 {
1847 	unsigned count = field->report_count;
1848 	unsigned offset = field->report_offset;
1849 	unsigned size = field->report_size;
1850 	unsigned n;
1851 
1852 	for (n = 0; n < count; n++) {
1853 		if (field->logical_minimum < 0)	/* signed values */
1854 			implement(hid, data, offset + n * size, size,
1855 				  s32ton(field->value[n], size));
1856 		else				/* unsigned values */
1857 			implement(hid, data, offset + n * size, size,
1858 				  field->value[n]);
1859 	}
1860 }
1861 
1862 /*
1863  * Compute the size of a report.
1864  */
1865 static size_t hid_compute_report_size(struct hid_report *report)
1866 {
1867 	if (report->size)
1868 		return ((report->size - 1) >> 3) + 1;
1869 
1870 	return 0;
1871 }
1872 
1873 /*
1874  * Create a report. 'data' has to be allocated using
1875  * hid_alloc_report_buf() so that it has proper size.
1876  */
1877 
1878 void hid_output_report(struct hid_report *report, __u8 *data)
1879 {
1880 	unsigned n;
1881 
1882 	if (report->id > 0)
1883 		*data++ = report->id;
1884 
1885 	memset(data, 0, hid_compute_report_size(report));
1886 	for (n = 0; n < report->maxfield; n++)
1887 		hid_output_field(report->device, report->field[n], data);
1888 }
1889 EXPORT_SYMBOL_GPL(hid_output_report);
1890 
1891 /*
1892  * Allocator for buffer that is going to be passed to hid_output_report()
1893  */
1894 u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
1895 {
1896 	/*
1897 	 * 7 extra bytes are necessary to achieve proper functionality
1898 	 * of implement() working on 8 byte chunks
1899 	 * 1 extra byte for the report ID if it is null (not used) so
1900 	 * we can reserve that extra byte in the first position of the buffer
1901 	 * when sending it to .raw_request()
1902 	 */
1903 
1904 	u32 len = hid_report_len(report) + 7 + (report->id == 0);
1905 
1906 	return kzalloc(len, flags);
1907 }
1908 EXPORT_SYMBOL_GPL(hid_alloc_report_buf);
1909 
1910 /*
1911  * Set a field value. The report this field belongs to has to be
1912  * created and transferred to the device, to set this value in the
1913  * device.
1914  */
1915 
1916 int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
1917 {
1918 	unsigned size;
1919 
1920 	if (!field)
1921 		return -1;
1922 
1923 	size = field->report_size;
1924 
1925 	hid_dump_input(field->report->device, field->usage + offset, value);
1926 
1927 	if (offset >= field->report_count) {
1928 		hid_err(field->report->device, "offset (%d) exceeds report_count (%d)\n",
1929 				offset, field->report_count);
1930 		return -1;
1931 	}
1932 	if (field->logical_minimum < 0) {
1933 		if (value != snto32(s32ton(value, size), size)) {
1934 			hid_err(field->report->device, "value %d is out of range\n", value);
1935 			return -1;
1936 		}
1937 	}
1938 	field->value[offset] = value;
1939 	return 0;
1940 }
1941 EXPORT_SYMBOL_GPL(hid_set_field);
1942 
1943 struct hid_field *hid_find_field(struct hid_device *hdev, unsigned int report_type,
1944 				 unsigned int application, unsigned int usage)
1945 {
1946 	struct list_head *report_list = &hdev->report_enum[report_type].report_list;
1947 	struct hid_report *report;
1948 	int i, j;
1949 
1950 	list_for_each_entry(report, report_list, list) {
1951 		if (report->application != application)
1952 			continue;
1953 
1954 		for (i = 0; i < report->maxfield; i++) {
1955 			struct hid_field *field = report->field[i];
1956 
1957 			for (j = 0; j < field->maxusage; j++) {
1958 				if (field->usage[j].hid == usage)
1959 					return field;
1960 			}
1961 		}
1962 	}
1963 
1964 	return NULL;
1965 }
1966 EXPORT_SYMBOL_GPL(hid_find_field);
1967 
1968 static struct hid_report *hid_get_report(struct hid_report_enum *report_enum,
1969 		const u8 *data)
1970 {
1971 	struct hid_report *report;
1972 	unsigned int n = 0;	/* Normally report number is 0 */
1973 
1974 	/* Device uses numbered reports, data[0] is report number */
1975 	if (report_enum->numbered)
1976 		n = *data;
1977 
1978 	report = report_enum->report_id_hash[n];
1979 	if (report == NULL)
1980 		dbg_hid("undefined report_id %u received\n", n);
1981 
1982 	return report;
1983 }
1984 
1985 /*
1986  * Implement a generic .request() callback, using .raw_request()
1987  * DO NOT USE in hid drivers directly, but through hid_hw_request instead.
1988  */
1989 int __hid_request(struct hid_device *hid, struct hid_report *report,
1990 		enum hid_class_request reqtype)
1991 {
1992 	char *buf, *data_buf;
1993 	int ret;
1994 	u32 len;
1995 
1996 	buf = hid_alloc_report_buf(report, GFP_KERNEL);
1997 	if (!buf)
1998 		return -ENOMEM;
1999 
2000 	data_buf = buf;
2001 	len = hid_report_len(report);
2002 
2003 	if (report->id == 0) {
2004 		/* reserve the first byte for the report ID */
2005 		data_buf++;
2006 		len++;
2007 	}
2008 
2009 	if (reqtype == HID_REQ_SET_REPORT)
2010 		hid_output_report(report, data_buf);
2011 
2012 	ret = hid_hw_raw_request(hid, report->id, buf, len, report->type, reqtype);
2013 	if (ret < 0) {
2014 		dbg_hid("unable to complete request: %d\n", ret);
2015 		goto out;
2016 	}
2017 
2018 	if (reqtype == HID_REQ_GET_REPORT)
2019 		hid_input_report(hid, report->type, buf, ret, 0);
2020 
2021 	ret = 0;
2022 
2023 out:
2024 	kfree(buf);
2025 	return ret;
2026 }
2027 EXPORT_SYMBOL_GPL(__hid_request);
2028 
2029 int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
2030 			 int interrupt)
2031 {
2032 	struct hid_report_enum *report_enum = hid->report_enum + type;
2033 	struct hid_report *report;
2034 	struct hid_driver *hdrv;
2035 	int max_buffer_size = HID_MAX_BUFFER_SIZE;
2036 	u32 rsize, csize = size;
2037 	u8 *cdata = data;
2038 	int ret = 0;
2039 
2040 	report = hid_get_report(report_enum, data);
2041 	if (!report)
2042 		goto out;
2043 
2044 	if (report_enum->numbered) {
2045 		cdata++;
2046 		csize--;
2047 	}
2048 
2049 	rsize = hid_compute_report_size(report);
2050 
2051 	if (hid->ll_driver->max_buffer_size)
2052 		max_buffer_size = hid->ll_driver->max_buffer_size;
2053 
2054 	if (report_enum->numbered && rsize >= max_buffer_size)
2055 		rsize = max_buffer_size - 1;
2056 	else if (rsize > max_buffer_size)
2057 		rsize = max_buffer_size;
2058 
2059 	if (csize < rsize) {
2060 		dbg_hid("report %d is too short, (%d < %d)\n", report->id,
2061 				csize, rsize);
2062 		memset(cdata + csize, 0, rsize - csize);
2063 	}
2064 
2065 	if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event)
2066 		hid->hiddev_report_event(hid, report);
2067 	if (hid->claimed & HID_CLAIMED_HIDRAW) {
2068 		ret = hidraw_report_event(hid, data, size);
2069 		if (ret)
2070 			goto out;
2071 	}
2072 
2073 	if (hid->claimed != HID_CLAIMED_HIDRAW && report->maxfield) {
2074 		hid_process_report(hid, report, cdata, interrupt);
2075 		hdrv = hid->driver;
2076 		if (hdrv && hdrv->report)
2077 			hdrv->report(hid, report);
2078 	}
2079 
2080 	if (hid->claimed & HID_CLAIMED_INPUT)
2081 		hidinput_report_event(hid, report);
2082 out:
2083 	return ret;
2084 }
2085 EXPORT_SYMBOL_GPL(hid_report_raw_event);
2086 
2087 
2088 static int __hid_input_report(struct hid_device *hid, enum hid_report_type type,
2089 			      u8 *data, u32 size, int interrupt, u64 source, bool from_bpf,
2090 			      bool lock_already_taken)
2091 {
2092 	struct hid_report_enum *report_enum;
2093 	struct hid_driver *hdrv;
2094 	struct hid_report *report;
2095 	int ret = 0;
2096 
2097 	if (!hid)
2098 		return -ENODEV;
2099 
2100 	ret = down_trylock(&hid->driver_input_lock);
2101 	if (lock_already_taken && !ret) {
2102 		up(&hid->driver_input_lock);
2103 		return -EINVAL;
2104 	} else if (!lock_already_taken && ret) {
2105 		return -EBUSY;
2106 	}
2107 
2108 	if (!hid->driver) {
2109 		ret = -ENODEV;
2110 		goto unlock;
2111 	}
2112 	report_enum = hid->report_enum + type;
2113 	hdrv = hid->driver;
2114 
2115 	data = dispatch_hid_bpf_device_event(hid, type, data, &size, interrupt, source, from_bpf);
2116 	if (IS_ERR(data)) {
2117 		ret = PTR_ERR(data);
2118 		goto unlock;
2119 	}
2120 
2121 	if (!size) {
2122 		dbg_hid("empty report\n");
2123 		ret = -1;
2124 		goto unlock;
2125 	}
2126 
2127 	/* Avoid unnecessary overhead if debugfs is disabled */
2128 	if (!list_empty(&hid->debug_list))
2129 		hid_dump_report(hid, type, data, size);
2130 
2131 	report = hid_get_report(report_enum, data);
2132 
2133 	if (!report) {
2134 		ret = -1;
2135 		goto unlock;
2136 	}
2137 
2138 	if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) {
2139 		ret = hdrv->raw_event(hid, report, data, size);
2140 		if (ret < 0)
2141 			goto unlock;
2142 	}
2143 
2144 	ret = hid_report_raw_event(hid, type, data, size, interrupt);
2145 
2146 unlock:
2147 	if (!lock_already_taken)
2148 		up(&hid->driver_input_lock);
2149 	return ret;
2150 }
2151 
2152 /**
2153  * hid_input_report - report data from lower layer (usb, bt...)
2154  *
2155  * @hid: hid device
2156  * @type: HID report type (HID_*_REPORT)
2157  * @data: report contents
2158  * @size: size of data parameter
2159  * @interrupt: distinguish between interrupt and control transfers
2160  *
2161  * This is data entry for lower layers.
2162  */
2163 int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
2164 		     int interrupt)
2165 {
2166 	return __hid_input_report(hid, type, data, size, interrupt, 0,
2167 				  false, /* from_bpf */
2168 				  false /* lock_already_taken */);
2169 }
2170 EXPORT_SYMBOL_GPL(hid_input_report);
2171 
2172 bool hid_match_one_id(const struct hid_device *hdev,
2173 		      const struct hid_device_id *id)
2174 {
2175 	return (id->bus == HID_BUS_ANY || id->bus == hdev->bus) &&
2176 		(id->group == HID_GROUP_ANY || id->group == hdev->group) &&
2177 		(id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) &&
2178 		(id->product == HID_ANY_ID || id->product == hdev->product);
2179 }
2180 
2181 const struct hid_device_id *hid_match_id(const struct hid_device *hdev,
2182 		const struct hid_device_id *id)
2183 {
2184 	for (; id->bus; id++)
2185 		if (hid_match_one_id(hdev, id))
2186 			return id;
2187 
2188 	return NULL;
2189 }
2190 EXPORT_SYMBOL_GPL(hid_match_id);
2191 
2192 static const struct hid_device_id hid_hiddev_list[] = {
2193 	{ HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS) },
2194 	{ HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS1) },
2195 	{ }
2196 };
2197 
2198 static bool hid_hiddev(struct hid_device *hdev)
2199 {
2200 	return !!hid_match_id(hdev, hid_hiddev_list);
2201 }
2202 
2203 
2204 static ssize_t
2205 report_descriptor_read(struct file *filp, struct kobject *kobj,
2206 		       const struct bin_attribute *attr,
2207 		       char *buf, loff_t off, size_t count)
2208 {
2209 	struct device *dev = kobj_to_dev(kobj);
2210 	struct hid_device *hdev = to_hid_device(dev);
2211 
2212 	if (off >= hdev->rsize)
2213 		return 0;
2214 
2215 	if (off + count > hdev->rsize)
2216 		count = hdev->rsize - off;
2217 
2218 	memcpy(buf, hdev->rdesc + off, count);
2219 
2220 	return count;
2221 }
2222 
2223 static ssize_t
2224 country_show(struct device *dev, struct device_attribute *attr,
2225 	     char *buf)
2226 {
2227 	struct hid_device *hdev = to_hid_device(dev);
2228 
2229 	return sprintf(buf, "%02x\n", hdev->country & 0xff);
2230 }
2231 
2232 static const BIN_ATTR_RO(report_descriptor, HID_MAX_DESCRIPTOR_SIZE);
2233 
2234 static const DEVICE_ATTR_RO(country);
2235 
2236 int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
2237 {
2238 	static const char *types[] = { "Device", "Pointer", "Mouse", "Device",
2239 		"Joystick", "Gamepad", "Keyboard", "Keypad",
2240 		"Multi-Axis Controller"
2241 	};
2242 	const char *type, *bus;
2243 	char buf[64] = "";
2244 	unsigned int i;
2245 	int len;
2246 	int ret;
2247 
2248 	ret = hid_bpf_connect_device(hdev);
2249 	if (ret)
2250 		return ret;
2251 
2252 	if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE)
2253 		connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV);
2254 	if (hdev->quirks & HID_QUIRK_HIDINPUT_FORCE)
2255 		connect_mask |= HID_CONNECT_HIDINPUT_FORCE;
2256 	if (hdev->bus != BUS_USB)
2257 		connect_mask &= ~HID_CONNECT_HIDDEV;
2258 	if (hid_hiddev(hdev))
2259 		connect_mask |= HID_CONNECT_HIDDEV_FORCE;
2260 
2261 	if ((connect_mask & HID_CONNECT_HIDINPUT) && !hidinput_connect(hdev,
2262 				connect_mask & HID_CONNECT_HIDINPUT_FORCE))
2263 		hdev->claimed |= HID_CLAIMED_INPUT;
2264 
2265 	if ((connect_mask & HID_CONNECT_HIDDEV) && hdev->hiddev_connect &&
2266 			!hdev->hiddev_connect(hdev,
2267 				connect_mask & HID_CONNECT_HIDDEV_FORCE))
2268 		hdev->claimed |= HID_CLAIMED_HIDDEV;
2269 	if ((connect_mask & HID_CONNECT_HIDRAW) && !hidraw_connect(hdev))
2270 		hdev->claimed |= HID_CLAIMED_HIDRAW;
2271 
2272 	if (connect_mask & HID_CONNECT_DRIVER)
2273 		hdev->claimed |= HID_CLAIMED_DRIVER;
2274 
2275 	/* Drivers with the ->raw_event callback set are not required to connect
2276 	 * to any other listener. */
2277 	if (!hdev->claimed && !hdev->driver->raw_event) {
2278 		hid_err(hdev, "device has no listeners, quitting\n");
2279 		return -ENODEV;
2280 	}
2281 
2282 	hid_process_ordering(hdev);
2283 
2284 	if ((hdev->claimed & HID_CLAIMED_INPUT) &&
2285 			(connect_mask & HID_CONNECT_FF) && hdev->ff_init)
2286 		hdev->ff_init(hdev);
2287 
2288 	len = 0;
2289 	if (hdev->claimed & HID_CLAIMED_INPUT)
2290 		len += sprintf(buf + len, "input");
2291 	if (hdev->claimed & HID_CLAIMED_HIDDEV)
2292 		len += sprintf(buf + len, "%shiddev%d", len ? "," : "",
2293 				((struct hiddev *)hdev->hiddev)->minor);
2294 	if (hdev->claimed & HID_CLAIMED_HIDRAW)
2295 		len += sprintf(buf + len, "%shidraw%d", len ? "," : "",
2296 				((struct hidraw *)hdev->hidraw)->minor);
2297 
2298 	type = "Device";
2299 	for (i = 0; i < hdev->maxcollection; i++) {
2300 		struct hid_collection *col = &hdev->collection[i];
2301 		if (col->type == HID_COLLECTION_APPLICATION &&
2302 		   (col->usage & HID_USAGE_PAGE) == HID_UP_GENDESK &&
2303 		   (col->usage & 0xffff) < ARRAY_SIZE(types)) {
2304 			type = types[col->usage & 0xffff];
2305 			break;
2306 		}
2307 	}
2308 
2309 	switch (hdev->bus) {
2310 	case BUS_USB:
2311 		bus = "USB";
2312 		break;
2313 	case BUS_BLUETOOTH:
2314 		bus = "BLUETOOTH";
2315 		break;
2316 	case BUS_I2C:
2317 		bus = "I2C";
2318 		break;
2319 	case BUS_SDW:
2320 		bus = "SOUNDWIRE";
2321 		break;
2322 	case BUS_VIRTUAL:
2323 		bus = "VIRTUAL";
2324 		break;
2325 	case BUS_INTEL_ISHTP:
2326 	case BUS_AMD_SFH:
2327 		bus = "SENSOR HUB";
2328 		break;
2329 	default:
2330 		bus = "<UNKNOWN>";
2331 	}
2332 
2333 	ret = device_create_file(&hdev->dev, &dev_attr_country);
2334 	if (ret)
2335 		hid_warn(hdev,
2336 			 "can't create sysfs country code attribute err: %d\n", ret);
2337 
2338 	hid_info(hdev, "%s: %s HID v%x.%02x %s [%s] on %s\n",
2339 		 buf, bus, hdev->version >> 8, hdev->version & 0xff,
2340 		 type, hdev->name, hdev->phys);
2341 
2342 	return 0;
2343 }
2344 EXPORT_SYMBOL_GPL(hid_connect);
2345 
2346 void hid_disconnect(struct hid_device *hdev)
2347 {
2348 	device_remove_file(&hdev->dev, &dev_attr_country);
2349 	if (hdev->claimed & HID_CLAIMED_INPUT)
2350 		hidinput_disconnect(hdev);
2351 	if (hdev->claimed & HID_CLAIMED_HIDDEV)
2352 		hdev->hiddev_disconnect(hdev);
2353 	if (hdev->claimed & HID_CLAIMED_HIDRAW)
2354 		hidraw_disconnect(hdev);
2355 	hdev->claimed = 0;
2356 
2357 	hid_bpf_disconnect_device(hdev);
2358 }
2359 EXPORT_SYMBOL_GPL(hid_disconnect);
2360 
2361 /**
2362  * hid_hw_start - start underlying HW
2363  * @hdev: hid device
2364  * @connect_mask: which outputs to connect, see HID_CONNECT_*
2365  *
2366  * Call this in probe function *after* hid_parse. This will setup HW
2367  * buffers and start the device (if not defeirred to device open).
2368  * hid_hw_stop must be called if this was successful.
2369  */
2370 int hid_hw_start(struct hid_device *hdev, unsigned int connect_mask)
2371 {
2372 	int error;
2373 
2374 	error = hdev->ll_driver->start(hdev);
2375 	if (error)
2376 		return error;
2377 
2378 	if (connect_mask) {
2379 		error = hid_connect(hdev, connect_mask);
2380 		if (error) {
2381 			hdev->ll_driver->stop(hdev);
2382 			return error;
2383 		}
2384 	}
2385 
2386 	return 0;
2387 }
2388 EXPORT_SYMBOL_GPL(hid_hw_start);
2389 
2390 /**
2391  * hid_hw_stop - stop underlying HW
2392  * @hdev: hid device
2393  *
2394  * This is usually called from remove function or from probe when something
2395  * failed and hid_hw_start was called already.
2396  */
2397 void hid_hw_stop(struct hid_device *hdev)
2398 {
2399 	hid_disconnect(hdev);
2400 	hdev->ll_driver->stop(hdev);
2401 }
2402 EXPORT_SYMBOL_GPL(hid_hw_stop);
2403 
2404 /**
2405  * hid_hw_open - signal underlying HW to start delivering events
2406  * @hdev: hid device
2407  *
2408  * Tell underlying HW to start delivering events from the device.
2409  * This function should be called sometime after successful call
2410  * to hid_hw_start().
2411  */
2412 int hid_hw_open(struct hid_device *hdev)
2413 {
2414 	int ret;
2415 
2416 	ret = mutex_lock_killable(&hdev->ll_open_lock);
2417 	if (ret)
2418 		return ret;
2419 
2420 	if (!hdev->ll_open_count++) {
2421 		ret = hdev->ll_driver->open(hdev);
2422 		if (ret)
2423 			hdev->ll_open_count--;
2424 
2425 		if (hdev->driver->on_hid_hw_open)
2426 			hdev->driver->on_hid_hw_open(hdev);
2427 	}
2428 
2429 	mutex_unlock(&hdev->ll_open_lock);
2430 	return ret;
2431 }
2432 EXPORT_SYMBOL_GPL(hid_hw_open);
2433 
2434 /**
2435  * hid_hw_close - signal underlaying HW to stop delivering events
2436  *
2437  * @hdev: hid device
2438  *
2439  * This function indicates that we are not interested in the events
2440  * from this device anymore. Delivery of events may or may not stop,
2441  * depending on the number of users still outstanding.
2442  */
2443 void hid_hw_close(struct hid_device *hdev)
2444 {
2445 	mutex_lock(&hdev->ll_open_lock);
2446 	if (!--hdev->ll_open_count) {
2447 		hdev->ll_driver->close(hdev);
2448 
2449 		if (hdev->driver->on_hid_hw_close)
2450 			hdev->driver->on_hid_hw_close(hdev);
2451 	}
2452 	mutex_unlock(&hdev->ll_open_lock);
2453 }
2454 EXPORT_SYMBOL_GPL(hid_hw_close);
2455 
2456 /**
2457  * hid_hw_request - send report request to device
2458  *
2459  * @hdev: hid device
2460  * @report: report to send
2461  * @reqtype: hid request type
2462  */
2463 void hid_hw_request(struct hid_device *hdev,
2464 		    struct hid_report *report, enum hid_class_request reqtype)
2465 {
2466 	if (hdev->ll_driver->request)
2467 		return hdev->ll_driver->request(hdev, report, reqtype);
2468 
2469 	__hid_request(hdev, report, reqtype);
2470 }
2471 EXPORT_SYMBOL_GPL(hid_hw_request);
2472 
2473 int __hid_hw_raw_request(struct hid_device *hdev,
2474 			 unsigned char reportnum, __u8 *buf,
2475 			 size_t len, enum hid_report_type rtype,
2476 			 enum hid_class_request reqtype,
2477 			 u64 source, bool from_bpf)
2478 {
2479 	unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
2480 	int ret;
2481 
2482 	if (hdev->ll_driver->max_buffer_size)
2483 		max_buffer_size = hdev->ll_driver->max_buffer_size;
2484 
2485 	if (len < 1 || len > max_buffer_size || !buf)
2486 		return -EINVAL;
2487 
2488 	ret = dispatch_hid_bpf_raw_requests(hdev, reportnum, buf, len, rtype,
2489 					    reqtype, source, from_bpf);
2490 	if (ret)
2491 		return ret;
2492 
2493 	return hdev->ll_driver->raw_request(hdev, reportnum, buf, len,
2494 					    rtype, reqtype);
2495 }
2496 
2497 /**
2498  * hid_hw_raw_request - send report request to device
2499  *
2500  * @hdev: hid device
2501  * @reportnum: report ID
2502  * @buf: in/out data to transfer
2503  * @len: length of buf
2504  * @rtype: HID report type
2505  * @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT
2506  *
2507  * Return: count of data transferred, negative if error
2508  *
2509  * Same behavior as hid_hw_request, but with raw buffers instead.
2510  */
2511 int hid_hw_raw_request(struct hid_device *hdev,
2512 		       unsigned char reportnum, __u8 *buf,
2513 		       size_t len, enum hid_report_type rtype, enum hid_class_request reqtype)
2514 {
2515 	return __hid_hw_raw_request(hdev, reportnum, buf, len, rtype, reqtype, 0, false);
2516 }
2517 EXPORT_SYMBOL_GPL(hid_hw_raw_request);
2518 
2519 int __hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len, u64 source,
2520 			   bool from_bpf)
2521 {
2522 	unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
2523 	int ret;
2524 
2525 	if (hdev->ll_driver->max_buffer_size)
2526 		max_buffer_size = hdev->ll_driver->max_buffer_size;
2527 
2528 	if (len < 1 || len > max_buffer_size || !buf)
2529 		return -EINVAL;
2530 
2531 	ret = dispatch_hid_bpf_output_report(hdev, buf, len, source, from_bpf);
2532 	if (ret)
2533 		return ret;
2534 
2535 	if (hdev->ll_driver->output_report)
2536 		return hdev->ll_driver->output_report(hdev, buf, len);
2537 
2538 	return -ENOSYS;
2539 }
2540 
2541 /**
2542  * hid_hw_output_report - send output report to device
2543  *
2544  * @hdev: hid device
2545  * @buf: raw data to transfer
2546  * @len: length of buf
2547  *
2548  * Return: count of data transferred, negative if error
2549  */
2550 int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len)
2551 {
2552 	return __hid_hw_output_report(hdev, buf, len, 0, false);
2553 }
2554 EXPORT_SYMBOL_GPL(hid_hw_output_report);
2555 
2556 #ifdef CONFIG_PM
2557 int hid_driver_suspend(struct hid_device *hdev, pm_message_t state)
2558 {
2559 	if (hdev->driver && hdev->driver->suspend)
2560 		return hdev->driver->suspend(hdev, state);
2561 
2562 	return 0;
2563 }
2564 EXPORT_SYMBOL_GPL(hid_driver_suspend);
2565 
2566 int hid_driver_reset_resume(struct hid_device *hdev)
2567 {
2568 	if (hdev->driver && hdev->driver->reset_resume)
2569 		return hdev->driver->reset_resume(hdev);
2570 
2571 	return 0;
2572 }
2573 EXPORT_SYMBOL_GPL(hid_driver_reset_resume);
2574 
2575 int hid_driver_resume(struct hid_device *hdev)
2576 {
2577 	if (hdev->driver && hdev->driver->resume)
2578 		return hdev->driver->resume(hdev);
2579 
2580 	return 0;
2581 }
2582 EXPORT_SYMBOL_GPL(hid_driver_resume);
2583 #endif /* CONFIG_PM */
2584 
2585 struct hid_dynid {
2586 	struct list_head list;
2587 	struct hid_device_id id;
2588 };
2589 
2590 /**
2591  * new_id_store - add a new HID device ID to this driver and re-probe devices
2592  * @drv: target device driver
2593  * @buf: buffer for scanning device ID data
2594  * @count: input size
2595  *
2596  * Adds a new dynamic hid device ID to this driver,
2597  * and causes the driver to probe for all devices again.
2598  */
2599 static ssize_t new_id_store(struct device_driver *drv, const char *buf,
2600 		size_t count)
2601 {
2602 	struct hid_driver *hdrv = to_hid_driver(drv);
2603 	struct hid_dynid *dynid;
2604 	__u32 bus, vendor, product;
2605 	unsigned long driver_data = 0;
2606 	int ret;
2607 
2608 	ret = sscanf(buf, "%x %x %x %lx",
2609 			&bus, &vendor, &product, &driver_data);
2610 	if (ret < 3)
2611 		return -EINVAL;
2612 
2613 	dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
2614 	if (!dynid)
2615 		return -ENOMEM;
2616 
2617 	dynid->id.bus = bus;
2618 	dynid->id.group = HID_GROUP_ANY;
2619 	dynid->id.vendor = vendor;
2620 	dynid->id.product = product;
2621 	dynid->id.driver_data = driver_data;
2622 
2623 	spin_lock(&hdrv->dyn_lock);
2624 	list_add_tail(&dynid->list, &hdrv->dyn_list);
2625 	spin_unlock(&hdrv->dyn_lock);
2626 
2627 	ret = driver_attach(&hdrv->driver);
2628 
2629 	return ret ? : count;
2630 }
2631 static DRIVER_ATTR_WO(new_id);
2632 
2633 static struct attribute *hid_drv_attrs[] = {
2634 	&driver_attr_new_id.attr,
2635 	NULL,
2636 };
2637 ATTRIBUTE_GROUPS(hid_drv);
2638 
2639 static void hid_free_dynids(struct hid_driver *hdrv)
2640 {
2641 	struct hid_dynid *dynid, *n;
2642 
2643 	spin_lock(&hdrv->dyn_lock);
2644 	list_for_each_entry_safe(dynid, n, &hdrv->dyn_list, list) {
2645 		list_del(&dynid->list);
2646 		kfree(dynid);
2647 	}
2648 	spin_unlock(&hdrv->dyn_lock);
2649 }
2650 
2651 const struct hid_device_id *hid_match_device(struct hid_device *hdev,
2652 					     struct hid_driver *hdrv)
2653 {
2654 	struct hid_dynid *dynid;
2655 
2656 	spin_lock(&hdrv->dyn_lock);
2657 	list_for_each_entry(dynid, &hdrv->dyn_list, list) {
2658 		if (hid_match_one_id(hdev, &dynid->id)) {
2659 			spin_unlock(&hdrv->dyn_lock);
2660 			return &dynid->id;
2661 		}
2662 	}
2663 	spin_unlock(&hdrv->dyn_lock);
2664 
2665 	return hid_match_id(hdev, hdrv->id_table);
2666 }
2667 EXPORT_SYMBOL_GPL(hid_match_device);
2668 
2669 static int hid_bus_match(struct device *dev, const struct device_driver *drv)
2670 {
2671 	struct hid_driver *hdrv = to_hid_driver(drv);
2672 	struct hid_device *hdev = to_hid_device(dev);
2673 
2674 	return hid_match_device(hdev, hdrv) != NULL;
2675 }
2676 
2677 /**
2678  * hid_compare_device_paths - check if both devices share the same path
2679  * @hdev_a: hid device
2680  * @hdev_b: hid device
2681  * @separator: char to use as separator
2682  *
2683  * Check if two devices share the same path up to the last occurrence of
2684  * the separator char. Both paths must exist (i.e., zero-length paths
2685  * don't match).
2686  */
2687 bool hid_compare_device_paths(struct hid_device *hdev_a,
2688 			      struct hid_device *hdev_b, char separator)
2689 {
2690 	int n1 = strrchr(hdev_a->phys, separator) - hdev_a->phys;
2691 	int n2 = strrchr(hdev_b->phys, separator) - hdev_b->phys;
2692 
2693 	if (n1 != n2 || n1 <= 0 || n2 <= 0)
2694 		return false;
2695 
2696 	return !strncmp(hdev_a->phys, hdev_b->phys, n1);
2697 }
2698 EXPORT_SYMBOL_GPL(hid_compare_device_paths);
2699 
2700 static bool hid_check_device_match(struct hid_device *hdev,
2701 				   struct hid_driver *hdrv,
2702 				   const struct hid_device_id **id)
2703 {
2704 	*id = hid_match_device(hdev, hdrv);
2705 	if (!*id)
2706 		return false;
2707 
2708 	if (hdrv->match)
2709 		return hdrv->match(hdev, hid_ignore_special_drivers);
2710 
2711 	/*
2712 	 * hid-generic implements .match(), so we must be dealing with a
2713 	 * different HID driver here, and can simply check if
2714 	 * hid_ignore_special_drivers or HID_QUIRK_IGNORE_SPECIAL_DRIVER
2715 	 * are set or not.
2716 	 */
2717 	return !hid_ignore_special_drivers && !(hdev->quirks & HID_QUIRK_IGNORE_SPECIAL_DRIVER);
2718 }
2719 
2720 static void hid_set_group(struct hid_device *hdev)
2721 {
2722 	int ret;
2723 
2724 	if (hid_ignore_special_drivers) {
2725 		hdev->group = HID_GROUP_GENERIC;
2726 	} else if (!hdev->group &&
2727 		   !(hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)) {
2728 		ret = hid_scan_report(hdev);
2729 		if (ret)
2730 			hid_warn(hdev, "bad device descriptor (%d)\n", ret);
2731 	}
2732 }
2733 
2734 static int __hid_device_probe(struct hid_device *hdev, struct hid_driver *hdrv)
2735 {
2736 	const struct hid_device_id *id;
2737 	int ret;
2738 
2739 	if (!hdev->bpf_rsize) {
2740 		/* we keep a reference to the currently scanned report descriptor */
2741 		const __u8  *original_rdesc = hdev->bpf_rdesc;
2742 
2743 		if (!original_rdesc)
2744 			original_rdesc = hdev->dev_rdesc;
2745 
2746 		/* in case a bpf program gets detached, we need to free the old one */
2747 		hid_free_bpf_rdesc(hdev);
2748 
2749 		/* keep this around so we know we called it once */
2750 		hdev->bpf_rsize = hdev->dev_rsize;
2751 
2752 		/* call_hid_bpf_rdesc_fixup will always return a valid pointer */
2753 		hdev->bpf_rdesc = call_hid_bpf_rdesc_fixup(hdev, hdev->dev_rdesc,
2754 							   &hdev->bpf_rsize);
2755 
2756 		/* the report descriptor changed, we need to re-scan it */
2757 		if (original_rdesc != hdev->bpf_rdesc) {
2758 			hdev->group = 0;
2759 			hid_set_group(hdev);
2760 		}
2761 	}
2762 
2763 	if (!hid_check_device_match(hdev, hdrv, &id))
2764 		return -ENODEV;
2765 
2766 	hdev->devres_group_id = devres_open_group(&hdev->dev, NULL, GFP_KERNEL);
2767 	if (!hdev->devres_group_id)
2768 		return -ENOMEM;
2769 
2770 	/* reset the quirks that has been previously set */
2771 	hdev->quirks = hid_lookup_quirk(hdev);
2772 	hdev->driver = hdrv;
2773 
2774 	if (hdrv->probe) {
2775 		ret = hdrv->probe(hdev, id);
2776 	} else { /* default probe */
2777 		ret = hid_open_report(hdev);
2778 		if (!ret)
2779 			ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
2780 	}
2781 
2782 	/*
2783 	 * Note that we are not closing the devres group opened above so
2784 	 * even resources that were attached to the device after probe is
2785 	 * run are released when hid_device_remove() is executed. This is
2786 	 * needed as some drivers would allocate additional resources,
2787 	 * for example when updating firmware.
2788 	 */
2789 
2790 	if (ret) {
2791 		devres_release_group(&hdev->dev, hdev->devres_group_id);
2792 		hid_close_report(hdev);
2793 		hdev->driver = NULL;
2794 	}
2795 
2796 	return ret;
2797 }
2798 
2799 static int hid_device_probe(struct device *dev)
2800 {
2801 	struct hid_device *hdev = to_hid_device(dev);
2802 	struct hid_driver *hdrv = to_hid_driver(dev->driver);
2803 	int ret = 0;
2804 
2805 	if (down_interruptible(&hdev->driver_input_lock))
2806 		return -EINTR;
2807 
2808 	hdev->io_started = false;
2809 	clear_bit(ffs(HID_STAT_REPROBED), &hdev->status);
2810 
2811 	if (!hdev->driver)
2812 		ret = __hid_device_probe(hdev, hdrv);
2813 
2814 	if (!hdev->io_started)
2815 		up(&hdev->driver_input_lock);
2816 
2817 	return ret;
2818 }
2819 
2820 static void hid_device_remove(struct device *dev)
2821 {
2822 	struct hid_device *hdev = to_hid_device(dev);
2823 	struct hid_driver *hdrv;
2824 
2825 	down(&hdev->driver_input_lock);
2826 	hdev->io_started = false;
2827 
2828 	hdrv = hdev->driver;
2829 	if (hdrv) {
2830 		if (hdrv->remove)
2831 			hdrv->remove(hdev);
2832 		else /* default remove */
2833 			hid_hw_stop(hdev);
2834 
2835 		/* Release all devres resources allocated by the driver */
2836 		devres_release_group(&hdev->dev, hdev->devres_group_id);
2837 
2838 		hid_close_report(hdev);
2839 		hdev->driver = NULL;
2840 	}
2841 
2842 	if (!hdev->io_started)
2843 		up(&hdev->driver_input_lock);
2844 }
2845 
2846 static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
2847 			     char *buf)
2848 {
2849 	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
2850 
2851 	return sysfs_emit(buf, "hid:b%04Xg%04Xv%08Xp%08X\n",
2852 			 hdev->bus, hdev->group, hdev->vendor, hdev->product);
2853 }
2854 static DEVICE_ATTR_RO(modalias);
2855 
2856 static struct attribute *hid_dev_attrs[] = {
2857 	&dev_attr_modalias.attr,
2858 	NULL,
2859 };
2860 static const struct bin_attribute *hid_dev_bin_attrs[] = {
2861 	&bin_attr_report_descriptor,
2862 	NULL
2863 };
2864 static const struct attribute_group hid_dev_group = {
2865 	.attrs = hid_dev_attrs,
2866 	.bin_attrs = hid_dev_bin_attrs,
2867 };
2868 __ATTRIBUTE_GROUPS(hid_dev);
2869 
2870 static int hid_uevent(const struct device *dev, struct kobj_uevent_env *env)
2871 {
2872 	const struct hid_device *hdev = to_hid_device(dev);
2873 
2874 	if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X",
2875 			hdev->bus, hdev->vendor, hdev->product))
2876 		return -ENOMEM;
2877 
2878 	if (add_uevent_var(env, "HID_NAME=%s", hdev->name))
2879 		return -ENOMEM;
2880 
2881 	if (add_uevent_var(env, "HID_PHYS=%s", hdev->phys))
2882 		return -ENOMEM;
2883 
2884 	if (add_uevent_var(env, "HID_UNIQ=%s", hdev->uniq))
2885 		return -ENOMEM;
2886 
2887 	if (add_uevent_var(env, "MODALIAS=hid:b%04Xg%04Xv%08Xp%08X",
2888 			   hdev->bus, hdev->group, hdev->vendor, hdev->product))
2889 		return -ENOMEM;
2890 
2891 	return 0;
2892 }
2893 
2894 const struct bus_type hid_bus_type = {
2895 	.name		= "hid",
2896 	.dev_groups	= hid_dev_groups,
2897 	.drv_groups	= hid_drv_groups,
2898 	.match		= hid_bus_match,
2899 	.probe		= hid_device_probe,
2900 	.remove		= hid_device_remove,
2901 	.uevent		= hid_uevent,
2902 };
2903 EXPORT_SYMBOL(hid_bus_type);
2904 
2905 int hid_add_device(struct hid_device *hdev)
2906 {
2907 	static atomic_t id = ATOMIC_INIT(0);
2908 	int ret;
2909 
2910 	if (WARN_ON(hdev->status & HID_STAT_ADDED))
2911 		return -EBUSY;
2912 
2913 	hdev->quirks = hid_lookup_quirk(hdev);
2914 
2915 	/* we need to kill them here, otherwise they will stay allocated to
2916 	 * wait for coming driver */
2917 	if (hid_ignore(hdev))
2918 		return -ENODEV;
2919 
2920 	/*
2921 	 * Check for the mandatory transport channel.
2922 	 */
2923 	 if (!hdev->ll_driver->raw_request) {
2924 		hid_err(hdev, "transport driver missing .raw_request()\n");
2925 		return -EINVAL;
2926 	 }
2927 
2928 	/*
2929 	 * Read the device report descriptor once and use as template
2930 	 * for the driver-specific modifications.
2931 	 */
2932 	ret = hdev->ll_driver->parse(hdev);
2933 	if (ret)
2934 		return ret;
2935 	if (!hdev->dev_rdesc)
2936 		return -ENODEV;
2937 
2938 	/*
2939 	 * Scan generic devices for group information
2940 	 */
2941 	hid_set_group(hdev);
2942 
2943 	hdev->id = atomic_inc_return(&id);
2944 
2945 	/* XXX hack, any other cleaner solution after the driver core
2946 	 * is converted to allow more than 20 bytes as the device name? */
2947 	dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
2948 		     hdev->vendor, hdev->product, hdev->id);
2949 
2950 	hid_debug_register(hdev, dev_name(&hdev->dev));
2951 	ret = device_add(&hdev->dev);
2952 	if (!ret)
2953 		hdev->status |= HID_STAT_ADDED;
2954 	else
2955 		hid_debug_unregister(hdev);
2956 
2957 	return ret;
2958 }
2959 EXPORT_SYMBOL_GPL(hid_add_device);
2960 
2961 /**
2962  * hid_allocate_device - allocate new hid device descriptor
2963  *
2964  * Allocate and initialize hid device, so that hid_destroy_device might be
2965  * used to free it.
2966  *
2967  * New hid_device pointer is returned on success, otherwise ERR_PTR encoded
2968  * error value.
2969  */
2970 struct hid_device *hid_allocate_device(void)
2971 {
2972 	struct hid_device *hdev;
2973 	int ret = -ENOMEM;
2974 
2975 	hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2976 	if (hdev == NULL)
2977 		return ERR_PTR(ret);
2978 
2979 	device_initialize(&hdev->dev);
2980 	hdev->dev.release = hid_device_release;
2981 	hdev->dev.bus = &hid_bus_type;
2982 	device_enable_async_suspend(&hdev->dev);
2983 
2984 	hid_close_report(hdev);
2985 
2986 	init_waitqueue_head(&hdev->debug_wait);
2987 	INIT_LIST_HEAD(&hdev->debug_list);
2988 	spin_lock_init(&hdev->debug_list_lock);
2989 	sema_init(&hdev->driver_input_lock, 1);
2990 	mutex_init(&hdev->ll_open_lock);
2991 	kref_init(&hdev->ref);
2992 
2993 	ret = hid_bpf_device_init(hdev);
2994 	if (ret)
2995 		goto out_err;
2996 
2997 	return hdev;
2998 
2999 out_err:
3000 	hid_destroy_device(hdev);
3001 	return ERR_PTR(ret);
3002 }
3003 EXPORT_SYMBOL_GPL(hid_allocate_device);
3004 
3005 static void hid_remove_device(struct hid_device *hdev)
3006 {
3007 	if (hdev->status & HID_STAT_ADDED) {
3008 		device_del(&hdev->dev);
3009 		hid_debug_unregister(hdev);
3010 		hdev->status &= ~HID_STAT_ADDED;
3011 	}
3012 	hid_free_bpf_rdesc(hdev);
3013 	kfree(hdev->dev_rdesc);
3014 	hdev->dev_rdesc = NULL;
3015 	hdev->dev_rsize = 0;
3016 	hdev->bpf_rsize = 0;
3017 }
3018 
3019 /**
3020  * hid_destroy_device - free previously allocated device
3021  *
3022  * @hdev: hid device
3023  *
3024  * If you allocate hid_device through hid_allocate_device, you should ever
3025  * free by this function.
3026  */
3027 void hid_destroy_device(struct hid_device *hdev)
3028 {
3029 	hid_bpf_destroy_device(hdev);
3030 	hid_remove_device(hdev);
3031 	put_device(&hdev->dev);
3032 }
3033 EXPORT_SYMBOL_GPL(hid_destroy_device);
3034 
3035 
3036 static int __hid_bus_reprobe_drivers(struct device *dev, void *data)
3037 {
3038 	struct hid_driver *hdrv = data;
3039 	struct hid_device *hdev = to_hid_device(dev);
3040 
3041 	if (hdev->driver == hdrv &&
3042 	    !hdrv->match(hdev, hid_ignore_special_drivers) &&
3043 	    !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
3044 		return device_reprobe(dev);
3045 
3046 	return 0;
3047 }
3048 
3049 static int __hid_bus_driver_added(struct device_driver *drv, void *data)
3050 {
3051 	struct hid_driver *hdrv = to_hid_driver(drv);
3052 
3053 	if (hdrv->match) {
3054 		bus_for_each_dev(&hid_bus_type, NULL, hdrv,
3055 				 __hid_bus_reprobe_drivers);
3056 	}
3057 
3058 	return 0;
3059 }
3060 
3061 static int __bus_removed_driver(struct device_driver *drv, void *data)
3062 {
3063 	return bus_rescan_devices(&hid_bus_type);
3064 }
3065 
3066 int __hid_register_driver(struct hid_driver *hdrv, struct module *owner,
3067 		const char *mod_name)
3068 {
3069 	int ret;
3070 
3071 	hdrv->driver.name = hdrv->name;
3072 	hdrv->driver.bus = &hid_bus_type;
3073 	hdrv->driver.owner = owner;
3074 	hdrv->driver.mod_name = mod_name;
3075 
3076 	INIT_LIST_HEAD(&hdrv->dyn_list);
3077 	spin_lock_init(&hdrv->dyn_lock);
3078 
3079 	ret = driver_register(&hdrv->driver);
3080 
3081 	if (ret == 0)
3082 		bus_for_each_drv(&hid_bus_type, NULL, NULL,
3083 				 __hid_bus_driver_added);
3084 
3085 	return ret;
3086 }
3087 EXPORT_SYMBOL_GPL(__hid_register_driver);
3088 
3089 void hid_unregister_driver(struct hid_driver *hdrv)
3090 {
3091 	driver_unregister(&hdrv->driver);
3092 	hid_free_dynids(hdrv);
3093 
3094 	bus_for_each_drv(&hid_bus_type, NULL, hdrv, __bus_removed_driver);
3095 }
3096 EXPORT_SYMBOL_GPL(hid_unregister_driver);
3097 
3098 int hid_check_keys_pressed(struct hid_device *hid)
3099 {
3100 	struct hid_input *hidinput;
3101 	int i;
3102 
3103 	if (!(hid->claimed & HID_CLAIMED_INPUT))
3104 		return 0;
3105 
3106 	list_for_each_entry(hidinput, &hid->inputs, list) {
3107 		for (i = 0; i < BITS_TO_LONGS(KEY_MAX); i++)
3108 			if (hidinput->input->key[i])
3109 				return 1;
3110 	}
3111 
3112 	return 0;
3113 }
3114 EXPORT_SYMBOL_GPL(hid_check_keys_pressed);
3115 
3116 #ifdef CONFIG_HID_BPF
3117 static const struct hid_ops __hid_ops = {
3118 	.hid_get_report = hid_get_report,
3119 	.hid_hw_raw_request = __hid_hw_raw_request,
3120 	.hid_hw_output_report = __hid_hw_output_report,
3121 	.hid_input_report = __hid_input_report,
3122 	.owner = THIS_MODULE,
3123 	.bus_type = &hid_bus_type,
3124 };
3125 #endif
3126 
3127 static int __init hid_init(void)
3128 {
3129 	int ret;
3130 
3131 	ret = bus_register(&hid_bus_type);
3132 	if (ret) {
3133 		pr_err("can't register hid bus\n");
3134 		goto err;
3135 	}
3136 
3137 #ifdef CONFIG_HID_BPF
3138 	hid_ops = &__hid_ops;
3139 #endif
3140 
3141 	ret = hidraw_init();
3142 	if (ret)
3143 		goto err_bus;
3144 
3145 	hid_debug_init();
3146 
3147 	return 0;
3148 err_bus:
3149 	bus_unregister(&hid_bus_type);
3150 err:
3151 	return ret;
3152 }
3153 
3154 static void __exit hid_exit(void)
3155 {
3156 #ifdef CONFIG_HID_BPF
3157 	hid_ops = NULL;
3158 #endif
3159 	hid_debug_exit();
3160 	hidraw_exit();
3161 	bus_unregister(&hid_bus_type);
3162 	hid_quirks_exit(HID_BUS_ANY);
3163 }
3164 
3165 module_init(hid_init);
3166 module_exit(hid_exit);
3167 
3168 MODULE_AUTHOR("Andreas Gal");
3169 MODULE_AUTHOR("Vojtech Pavlik");
3170 MODULE_AUTHOR("Jiri Kosina");
3171 MODULE_DESCRIPTION("HID support for Linux");
3172 MODULE_LICENSE("GPL");
3173