xref: /linux/drivers/hid/hid-core.c (revision e003ef2cb1de41edda508ea1fdb21974f9f18dfb)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  HID support for Linux
4  *
5  *  Copyright (c) 1999 Andreas Gal
6  *  Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
7  *  Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
8  *  Copyright (c) 2006-2012 Jiri Kosina
9  */
10 
11 /*
12  */
13 
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/list.h>
21 #include <linux/mm.h>
22 #include <linux/spinlock.h>
23 #include <linux/unaligned.h>
24 #include <asm/byteorder.h>
25 #include <linux/input.h>
26 #include <linux/wait.h>
27 #include <linux/vmalloc.h>
28 #include <linux/sched.h>
29 #include <linux/semaphore.h>
30 
31 #include <linux/hid.h>
32 #include <linux/hiddev.h>
33 #include <linux/hid-debug.h>
34 #include <linux/hidraw.h>
35 
36 #include "hid-ids.h"
37 
38 /*
39  * Version Information
40  */
41 
42 #define DRIVER_DESC "HID core driver"
43 
44 static int hid_ignore_special_drivers = 0;
45 module_param_named(ignore_special_drivers, hid_ignore_special_drivers, int, 0600);
46 MODULE_PARM_DESC(ignore_special_drivers, "Ignore any special drivers and handle all devices by generic driver");
47 
48 /*
49  * Convert a signed n-bit integer to signed 32-bit integer.
50  */
51 
snto32(__u32 value,unsigned int n)52 static s32 snto32(__u32 value, unsigned int n)
53 {
54 	if (!value || !n)
55 		return 0;
56 
57 	if (n > 32)
58 		n = 32;
59 
60 	return sign_extend32(value, n - 1);
61 }
62 
63 /*
64  * Convert a signed 32-bit integer to a signed n-bit integer.
65  */
66 
s32ton(__s32 value,unsigned int n)67 static u32 s32ton(__s32 value, unsigned int n)
68 {
69 	s32 a = value >> (n - 1);
70 
71 	if (a && a != -1)
72 		return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1;
73 	return value & ((1 << n) - 1);
74 }
75 
76 /*
77  * Register a new report for a device.
78  */
79 
hid_register_report(struct hid_device * device,enum hid_report_type type,unsigned int id,unsigned int application)80 struct hid_report *hid_register_report(struct hid_device *device,
81 				       enum hid_report_type type, unsigned int id,
82 				       unsigned int application)
83 {
84 	struct hid_report_enum *report_enum = device->report_enum + type;
85 	struct hid_report *report;
86 
87 	if (id >= HID_MAX_IDS)
88 		return NULL;
89 	if (report_enum->report_id_hash[id])
90 		return report_enum->report_id_hash[id];
91 
92 	report = kzalloc(sizeof(struct hid_report), GFP_KERNEL);
93 	if (!report)
94 		return NULL;
95 
96 	if (id != 0)
97 		report_enum->numbered = 1;
98 
99 	report->id = id;
100 	report->type = type;
101 	report->size = 0;
102 	report->device = device;
103 	report->application = application;
104 	report_enum->report_id_hash[id] = report;
105 
106 	list_add_tail(&report->list, &report_enum->report_list);
107 	INIT_LIST_HEAD(&report->field_entry_list);
108 
109 	return report;
110 }
111 EXPORT_SYMBOL_GPL(hid_register_report);
112 
113 /*
114  * Register a new field for this report.
115  */
116 
hid_register_field(struct hid_report * report,unsigned usages)117 static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages)
118 {
119 	struct hid_field *field;
120 
121 	if (report->maxfield == HID_MAX_FIELDS) {
122 		hid_err(report->device, "too many fields in report\n");
123 		return NULL;
124 	}
125 
126 	field = kvzalloc((sizeof(struct hid_field) +
127 			  usages * sizeof(struct hid_usage) +
128 			  3 * usages * sizeof(unsigned int)), GFP_KERNEL);
129 	if (!field)
130 		return NULL;
131 
132 	field->index = report->maxfield++;
133 	report->field[field->index] = field;
134 	field->usage = (struct hid_usage *)(field + 1);
135 	field->value = (s32 *)(field->usage + usages);
136 	field->new_value = (s32 *)(field->value + usages);
137 	field->usages_priorities = (s32 *)(field->new_value + usages);
138 	field->report = report;
139 
140 	return field;
141 }
142 
143 /*
144  * Open a collection. The type/usage is pushed on the stack.
145  */
146 
open_collection(struct hid_parser * parser,unsigned type)147 static int open_collection(struct hid_parser *parser, unsigned type)
148 {
149 	struct hid_collection *collection;
150 	unsigned usage;
151 	int collection_index;
152 
153 	usage = parser->local.usage[0];
154 
155 	if (parser->collection_stack_ptr == parser->collection_stack_size) {
156 		unsigned int *collection_stack;
157 		unsigned int new_size = parser->collection_stack_size +
158 					HID_COLLECTION_STACK_SIZE;
159 
160 		collection_stack = krealloc(parser->collection_stack,
161 					    new_size * sizeof(unsigned int),
162 					    GFP_KERNEL);
163 		if (!collection_stack)
164 			return -ENOMEM;
165 
166 		parser->collection_stack = collection_stack;
167 		parser->collection_stack_size = new_size;
168 	}
169 
170 	if (parser->device->maxcollection == parser->device->collection_size) {
171 		collection = kmalloc(
172 				array3_size(sizeof(struct hid_collection),
173 					    parser->device->collection_size,
174 					    2),
175 				GFP_KERNEL);
176 		if (collection == NULL) {
177 			hid_err(parser->device, "failed to reallocate collection array\n");
178 			return -ENOMEM;
179 		}
180 		memcpy(collection, parser->device->collection,
181 			sizeof(struct hid_collection) *
182 			parser->device->collection_size);
183 		memset(collection + parser->device->collection_size, 0,
184 			sizeof(struct hid_collection) *
185 			parser->device->collection_size);
186 		kfree(parser->device->collection);
187 		parser->device->collection = collection;
188 		parser->device->collection_size *= 2;
189 	}
190 
191 	parser->collection_stack[parser->collection_stack_ptr++] =
192 		parser->device->maxcollection;
193 
194 	collection_index = parser->device->maxcollection++;
195 	collection = parser->device->collection + collection_index;
196 	collection->type = type;
197 	collection->usage = usage;
198 	collection->level = parser->collection_stack_ptr - 1;
199 	collection->parent_idx = (collection->level == 0) ? -1 :
200 		parser->collection_stack[collection->level - 1];
201 
202 	if (type == HID_COLLECTION_APPLICATION)
203 		parser->device->maxapplication++;
204 
205 	return 0;
206 }
207 
208 /*
209  * Close a collection.
210  */
211 
close_collection(struct hid_parser * parser)212 static int close_collection(struct hid_parser *parser)
213 {
214 	if (!parser->collection_stack_ptr) {
215 		hid_err(parser->device, "collection stack underflow\n");
216 		return -EINVAL;
217 	}
218 	parser->collection_stack_ptr--;
219 	return 0;
220 }
221 
222 /*
223  * Climb up the stack, search for the specified collection type
224  * and return the usage.
225  */
226 
hid_lookup_collection(struct hid_parser * parser,unsigned type)227 static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
228 {
229 	struct hid_collection *collection = parser->device->collection;
230 	int n;
231 
232 	for (n = parser->collection_stack_ptr - 1; n >= 0; n--) {
233 		unsigned index = parser->collection_stack[n];
234 		if (collection[index].type == type)
235 			return collection[index].usage;
236 	}
237 	return 0; /* we know nothing about this usage type */
238 }
239 
240 /*
241  * Concatenate usage which defines 16 bits or less with the
242  * currently defined usage page to form a 32 bit usage
243  */
244 
complete_usage(struct hid_parser * parser,unsigned int index)245 static void complete_usage(struct hid_parser *parser, unsigned int index)
246 {
247 	parser->local.usage[index] &= 0xFFFF;
248 	parser->local.usage[index] |=
249 		(parser->global.usage_page & 0xFFFF) << 16;
250 }
251 
252 /*
253  * Add a usage to the temporary parser table.
254  */
255 
hid_add_usage(struct hid_parser * parser,unsigned usage,u8 size)256 static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size)
257 {
258 	if (parser->local.usage_index >= HID_MAX_USAGES) {
259 		hid_err(parser->device, "usage index exceeded\n");
260 		return -1;
261 	}
262 	parser->local.usage[parser->local.usage_index] = usage;
263 
264 	/*
265 	 * If Usage item only includes usage id, concatenate it with
266 	 * currently defined usage page
267 	 */
268 	if (size <= 2)
269 		complete_usage(parser, parser->local.usage_index);
270 
271 	parser->local.usage_size[parser->local.usage_index] = size;
272 	parser->local.collection_index[parser->local.usage_index] =
273 		parser->collection_stack_ptr ?
274 		parser->collection_stack[parser->collection_stack_ptr - 1] : 0;
275 	parser->local.usage_index++;
276 	return 0;
277 }
278 
279 /*
280  * Register a new field for this report.
281  */
282 
hid_add_field(struct hid_parser * parser,unsigned report_type,unsigned flags)283 static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsigned flags)
284 {
285 	struct hid_report *report;
286 	struct hid_field *field;
287 	unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
288 	unsigned int usages;
289 	unsigned int offset;
290 	unsigned int i;
291 	unsigned int application;
292 
293 	application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION);
294 
295 	report = hid_register_report(parser->device, report_type,
296 				     parser->global.report_id, application);
297 	if (!report) {
298 		hid_err(parser->device, "hid_register_report failed\n");
299 		return -1;
300 	}
301 
302 	/* Handle both signed and unsigned cases properly */
303 	if ((parser->global.logical_minimum < 0 &&
304 		parser->global.logical_maximum <
305 		parser->global.logical_minimum) ||
306 		(parser->global.logical_minimum >= 0 &&
307 		(__u32)parser->global.logical_maximum <
308 		(__u32)parser->global.logical_minimum)) {
309 		dbg_hid("logical range invalid 0x%x 0x%x\n",
310 			parser->global.logical_minimum,
311 			parser->global.logical_maximum);
312 		return -1;
313 	}
314 
315 	offset = report->size;
316 	report->size += parser->global.report_size * parser->global.report_count;
317 
318 	if (parser->device->ll_driver->max_buffer_size)
319 		max_buffer_size = parser->device->ll_driver->max_buffer_size;
320 
321 	/* Total size check: Allow for possible report index byte */
322 	if (report->size > (max_buffer_size - 1) << 3) {
323 		hid_err(parser->device, "report is too long\n");
324 		return -1;
325 	}
326 
327 	if (!parser->local.usage_index) /* Ignore padding fields */
328 		return 0;
329 
330 	usages = max_t(unsigned, parser->local.usage_index,
331 				 parser->global.report_count);
332 
333 	field = hid_register_field(report, usages);
334 	if (!field)
335 		return 0;
336 
337 	field->physical = hid_lookup_collection(parser, HID_COLLECTION_PHYSICAL);
338 	field->logical = hid_lookup_collection(parser, HID_COLLECTION_LOGICAL);
339 	field->application = application;
340 
341 	for (i = 0; i < usages; i++) {
342 		unsigned j = i;
343 		/* Duplicate the last usage we parsed if we have excess values */
344 		if (i >= parser->local.usage_index)
345 			j = parser->local.usage_index - 1;
346 		field->usage[i].hid = parser->local.usage[j];
347 		field->usage[i].collection_index =
348 			parser->local.collection_index[j];
349 		field->usage[i].usage_index = i;
350 		field->usage[i].resolution_multiplier = 1;
351 	}
352 
353 	field->maxusage = usages;
354 	field->flags = flags;
355 	field->report_offset = offset;
356 	field->report_type = report_type;
357 	field->report_size = parser->global.report_size;
358 	field->report_count = parser->global.report_count;
359 	field->logical_minimum = parser->global.logical_minimum;
360 	field->logical_maximum = parser->global.logical_maximum;
361 	field->physical_minimum = parser->global.physical_minimum;
362 	field->physical_maximum = parser->global.physical_maximum;
363 	field->unit_exponent = parser->global.unit_exponent;
364 	field->unit = parser->global.unit;
365 
366 	return 0;
367 }
368 
369 /*
370  * Read data value from item.
371  */
372 
item_udata(struct hid_item * item)373 static u32 item_udata(struct hid_item *item)
374 {
375 	switch (item->size) {
376 	case 1: return item->data.u8;
377 	case 2: return item->data.u16;
378 	case 4: return item->data.u32;
379 	}
380 	return 0;
381 }
382 
item_sdata(struct hid_item * item)383 static s32 item_sdata(struct hid_item *item)
384 {
385 	switch (item->size) {
386 	case 1: return item->data.s8;
387 	case 2: return item->data.s16;
388 	case 4: return item->data.s32;
389 	}
390 	return 0;
391 }
392 
393 /*
394  * Process a global item.
395  */
396 
hid_parser_global(struct hid_parser * parser,struct hid_item * item)397 static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
398 {
399 	__s32 raw_value;
400 	switch (item->tag) {
401 	case HID_GLOBAL_ITEM_TAG_PUSH:
402 
403 		if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) {
404 			hid_err(parser->device, "global environment stack overflow\n");
405 			return -1;
406 		}
407 
408 		memcpy(parser->global_stack + parser->global_stack_ptr++,
409 			&parser->global, sizeof(struct hid_global));
410 		return 0;
411 
412 	case HID_GLOBAL_ITEM_TAG_POP:
413 
414 		if (!parser->global_stack_ptr) {
415 			hid_err(parser->device, "global environment stack underflow\n");
416 			return -1;
417 		}
418 
419 		memcpy(&parser->global, parser->global_stack +
420 			--parser->global_stack_ptr, sizeof(struct hid_global));
421 		return 0;
422 
423 	case HID_GLOBAL_ITEM_TAG_USAGE_PAGE:
424 		parser->global.usage_page = item_udata(item);
425 		return 0;
426 
427 	case HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM:
428 		parser->global.logical_minimum = item_sdata(item);
429 		return 0;
430 
431 	case HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM:
432 		if (parser->global.logical_minimum < 0)
433 			parser->global.logical_maximum = item_sdata(item);
434 		else
435 			parser->global.logical_maximum = item_udata(item);
436 		return 0;
437 
438 	case HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM:
439 		parser->global.physical_minimum = item_sdata(item);
440 		return 0;
441 
442 	case HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM:
443 		if (parser->global.physical_minimum < 0)
444 			parser->global.physical_maximum = item_sdata(item);
445 		else
446 			parser->global.physical_maximum = item_udata(item);
447 		return 0;
448 
449 	case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT:
450 		/* Many devices provide unit exponent as a two's complement
451 		 * nibble due to the common misunderstanding of HID
452 		 * specification 1.11, 6.2.2.7 Global Items. Attempt to handle
453 		 * both this and the standard encoding. */
454 		raw_value = item_sdata(item);
455 		if (!(raw_value & 0xfffffff0))
456 			parser->global.unit_exponent = snto32(raw_value, 4);
457 		else
458 			parser->global.unit_exponent = raw_value;
459 		return 0;
460 
461 	case HID_GLOBAL_ITEM_TAG_UNIT:
462 		parser->global.unit = item_udata(item);
463 		return 0;
464 
465 	case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
466 		parser->global.report_size = item_udata(item);
467 		if (parser->global.report_size > 256) {
468 			hid_err(parser->device, "invalid report_size %d\n",
469 					parser->global.report_size);
470 			return -1;
471 		}
472 		return 0;
473 
474 	case HID_GLOBAL_ITEM_TAG_REPORT_COUNT:
475 		parser->global.report_count = item_udata(item);
476 		if (parser->global.report_count > HID_MAX_USAGES) {
477 			hid_err(parser->device, "invalid report_count %d\n",
478 					parser->global.report_count);
479 			return -1;
480 		}
481 		return 0;
482 
483 	case HID_GLOBAL_ITEM_TAG_REPORT_ID:
484 		parser->global.report_id = item_udata(item);
485 		if (parser->global.report_id == 0 ||
486 		    parser->global.report_id >= HID_MAX_IDS) {
487 			hid_err(parser->device, "report_id %u is invalid\n",
488 				parser->global.report_id);
489 			return -1;
490 		}
491 		return 0;
492 
493 	default:
494 		hid_err(parser->device, "unknown global tag 0x%x\n", item->tag);
495 		return -1;
496 	}
497 }
498 
499 /*
500  * Process a local item.
501  */
502 
hid_parser_local(struct hid_parser * parser,struct hid_item * item)503 static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
504 {
505 	__u32 data;
506 	unsigned n;
507 	__u32 count;
508 
509 	data = item_udata(item);
510 
511 	switch (item->tag) {
512 	case HID_LOCAL_ITEM_TAG_DELIMITER:
513 
514 		if (data) {
515 			/*
516 			 * We treat items before the first delimiter
517 			 * as global to all usage sets (branch 0).
518 			 * In the moment we process only these global
519 			 * items and the first delimiter set.
520 			 */
521 			if (parser->local.delimiter_depth != 0) {
522 				hid_err(parser->device, "nested delimiters\n");
523 				return -1;
524 			}
525 			parser->local.delimiter_depth++;
526 			parser->local.delimiter_branch++;
527 		} else {
528 			if (parser->local.delimiter_depth < 1) {
529 				hid_err(parser->device, "bogus close delimiter\n");
530 				return -1;
531 			}
532 			parser->local.delimiter_depth--;
533 		}
534 		return 0;
535 
536 	case HID_LOCAL_ITEM_TAG_USAGE:
537 
538 		if (parser->local.delimiter_branch > 1) {
539 			dbg_hid("alternative usage ignored\n");
540 			return 0;
541 		}
542 
543 		return hid_add_usage(parser, data, item->size);
544 
545 	case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
546 
547 		if (parser->local.delimiter_branch > 1) {
548 			dbg_hid("alternative usage ignored\n");
549 			return 0;
550 		}
551 
552 		parser->local.usage_minimum = data;
553 		return 0;
554 
555 	case HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM:
556 
557 		if (parser->local.delimiter_branch > 1) {
558 			dbg_hid("alternative usage ignored\n");
559 			return 0;
560 		}
561 
562 		count = data - parser->local.usage_minimum;
563 		if (count + parser->local.usage_index >= HID_MAX_USAGES) {
564 			/*
565 			 * We do not warn if the name is not set, we are
566 			 * actually pre-scanning the device.
567 			 */
568 			if (dev_name(&parser->device->dev))
569 				hid_warn(parser->device,
570 					 "ignoring exceeding usage max\n");
571 			data = HID_MAX_USAGES - parser->local.usage_index +
572 				parser->local.usage_minimum - 1;
573 			if (data <= 0) {
574 				hid_err(parser->device,
575 					"no more usage index available\n");
576 				return -1;
577 			}
578 		}
579 
580 		for (n = parser->local.usage_minimum; n <= data; n++)
581 			if (hid_add_usage(parser, n, item->size)) {
582 				dbg_hid("hid_add_usage failed\n");
583 				return -1;
584 			}
585 		return 0;
586 
587 	default:
588 
589 		dbg_hid("unknown local item tag 0x%x\n", item->tag);
590 		return 0;
591 	}
592 	return 0;
593 }
594 
595 /*
596  * Concatenate Usage Pages into Usages where relevant:
597  * As per specification, 6.2.2.8: "When the parser encounters a main item it
598  * concatenates the last declared Usage Page with a Usage to form a complete
599  * usage value."
600  */
601 
hid_concatenate_last_usage_page(struct hid_parser * parser)602 static void hid_concatenate_last_usage_page(struct hid_parser *parser)
603 {
604 	int i;
605 	unsigned int usage_page;
606 	unsigned int current_page;
607 
608 	if (!parser->local.usage_index)
609 		return;
610 
611 	usage_page = parser->global.usage_page;
612 
613 	/*
614 	 * Concatenate usage page again only if last declared Usage Page
615 	 * has not been already used in previous usages concatenation
616 	 */
617 	for (i = parser->local.usage_index - 1; i >= 0; i--) {
618 		if (parser->local.usage_size[i] > 2)
619 			/* Ignore extended usages */
620 			continue;
621 
622 		current_page = parser->local.usage[i] >> 16;
623 		if (current_page == usage_page)
624 			break;
625 
626 		complete_usage(parser, i);
627 	}
628 }
629 
630 /*
631  * Process a main item.
632  */
633 
hid_parser_main(struct hid_parser * parser,struct hid_item * item)634 static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
635 {
636 	__u32 data;
637 	int ret;
638 
639 	hid_concatenate_last_usage_page(parser);
640 
641 	data = item_udata(item);
642 
643 	switch (item->tag) {
644 	case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
645 		ret = open_collection(parser, data & 0xff);
646 		break;
647 	case HID_MAIN_ITEM_TAG_END_COLLECTION:
648 		ret = close_collection(parser);
649 		break;
650 	case HID_MAIN_ITEM_TAG_INPUT:
651 		ret = hid_add_field(parser, HID_INPUT_REPORT, data);
652 		break;
653 	case HID_MAIN_ITEM_TAG_OUTPUT:
654 		ret = hid_add_field(parser, HID_OUTPUT_REPORT, data);
655 		break;
656 	case HID_MAIN_ITEM_TAG_FEATURE:
657 		ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
658 		break;
659 	default:
660 		if (item->tag >= HID_MAIN_ITEM_TAG_RESERVED_MIN &&
661 			item->tag <= HID_MAIN_ITEM_TAG_RESERVED_MAX)
662 			hid_warn(parser->device, "reserved main item tag 0x%x\n", item->tag);
663 		else
664 			hid_warn(parser->device, "unknown main item tag 0x%x\n", item->tag);
665 		ret = 0;
666 	}
667 
668 	memset(&parser->local, 0, sizeof(parser->local));	/* Reset the local parser environment */
669 
670 	return ret;
671 }
672 
673 /*
674  * Process a reserved item.
675  */
676 
hid_parser_reserved(struct hid_parser * parser,struct hid_item * item)677 static int hid_parser_reserved(struct hid_parser *parser, struct hid_item *item)
678 {
679 	dbg_hid("reserved item type, tag 0x%x\n", item->tag);
680 	return 0;
681 }
682 
683 /*
684  * Free a report and all registered fields. The field->usage and
685  * field->value table's are allocated behind the field, so we need
686  * only to free(field) itself.
687  */
688 
hid_free_report(struct hid_report * report)689 static void hid_free_report(struct hid_report *report)
690 {
691 	unsigned n;
692 
693 	kfree(report->field_entries);
694 
695 	for (n = 0; n < report->maxfield; n++)
696 		kvfree(report->field[n]);
697 	kfree(report);
698 }
699 
700 /*
701  * Close report. This function returns the device
702  * state to the point prior to hid_open_report().
703  */
hid_close_report(struct hid_device * device)704 static void hid_close_report(struct hid_device *device)
705 {
706 	unsigned i, j;
707 
708 	for (i = 0; i < HID_REPORT_TYPES; i++) {
709 		struct hid_report_enum *report_enum = device->report_enum + i;
710 
711 		for (j = 0; j < HID_MAX_IDS; j++) {
712 			struct hid_report *report = report_enum->report_id_hash[j];
713 			if (report)
714 				hid_free_report(report);
715 		}
716 		memset(report_enum, 0, sizeof(*report_enum));
717 		INIT_LIST_HEAD(&report_enum->report_list);
718 	}
719 
720 	/*
721 	 * If the HID driver had a rdesc_fixup() callback, dev->rdesc
722 	 * will be allocated by hid-core and needs to be freed.
723 	 * Otherwise, it is either equal to dev_rdesc or bpf_rdesc, in
724 	 * which cases it'll be freed later on device removal or destroy.
725 	 */
726 	if (device->rdesc != device->dev_rdesc && device->rdesc != device->bpf_rdesc)
727 		kfree(device->rdesc);
728 	device->rdesc = NULL;
729 	device->rsize = 0;
730 
731 	kfree(device->collection);
732 	device->collection = NULL;
733 	device->collection_size = 0;
734 	device->maxcollection = 0;
735 	device->maxapplication = 0;
736 
737 	device->status &= ~HID_STAT_PARSED;
738 }
739 
hid_free_bpf_rdesc(struct hid_device * hdev)740 static inline void hid_free_bpf_rdesc(struct hid_device *hdev)
741 {
742 	/* bpf_rdesc is either equal to dev_rdesc or allocated by call_hid_bpf_rdesc_fixup() */
743 	if (hdev->bpf_rdesc != hdev->dev_rdesc)
744 		kfree(hdev->bpf_rdesc);
745 	hdev->bpf_rdesc = NULL;
746 }
747 
748 /*
749  * Free a device structure, all reports, and all fields.
750  */
751 
hiddev_free(struct kref * ref)752 void hiddev_free(struct kref *ref)
753 {
754 	struct hid_device *hid = container_of(ref, struct hid_device, ref);
755 
756 	hid_close_report(hid);
757 	hid_free_bpf_rdesc(hid);
758 	kfree(hid->dev_rdesc);
759 	kfree(hid);
760 }
761 
hid_device_release(struct device * dev)762 static void hid_device_release(struct device *dev)
763 {
764 	struct hid_device *hid = to_hid_device(dev);
765 
766 	kref_put(&hid->ref, hiddev_free);
767 }
768 
769 /*
770  * Fetch a report description item from the data stream. We support long
771  * items, though they are not used yet.
772  */
773 
fetch_item(const __u8 * start,const __u8 * end,struct hid_item * item)774 static const u8 *fetch_item(const __u8 *start, const __u8 *end, struct hid_item *item)
775 {
776 	u8 b;
777 
778 	if ((end - start) <= 0)
779 		return NULL;
780 
781 	b = *start++;
782 
783 	item->type = (b >> 2) & 3;
784 	item->tag  = (b >> 4) & 15;
785 
786 	if (item->tag == HID_ITEM_TAG_LONG) {
787 
788 		item->format = HID_ITEM_FORMAT_LONG;
789 
790 		if ((end - start) < 2)
791 			return NULL;
792 
793 		item->size = *start++;
794 		item->tag  = *start++;
795 
796 		if ((end - start) < item->size)
797 			return NULL;
798 
799 		item->data.longdata = start;
800 		start += item->size;
801 		return start;
802 	}
803 
804 	item->format = HID_ITEM_FORMAT_SHORT;
805 	item->size = BIT(b & 3) >> 1; /* 0, 1, 2, 3 -> 0, 1, 2, 4 */
806 
807 	if (end - start < item->size)
808 		return NULL;
809 
810 	switch (item->size) {
811 	case 0:
812 		break;
813 
814 	case 1:
815 		item->data.u8 = *start;
816 		break;
817 
818 	case 2:
819 		item->data.u16 = get_unaligned_le16(start);
820 		break;
821 
822 	case 4:
823 		item->data.u32 = get_unaligned_le32(start);
824 		break;
825 	}
826 
827 	return start + item->size;
828 }
829 
hid_scan_input_usage(struct hid_parser * parser,u32 usage)830 static void hid_scan_input_usage(struct hid_parser *parser, u32 usage)
831 {
832 	struct hid_device *hid = parser->device;
833 
834 	if (usage == HID_DG_CONTACTID)
835 		hid->group = HID_GROUP_MULTITOUCH;
836 }
837 
hid_scan_feature_usage(struct hid_parser * parser,u32 usage)838 static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage)
839 {
840 	if (usage == 0xff0000c5 && parser->global.report_count == 256 &&
841 	    parser->global.report_size == 8)
842 		parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
843 
844 	if (usage == 0xff0000c6 && parser->global.report_count == 1 &&
845 	    parser->global.report_size == 8)
846 		parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
847 }
848 
hid_scan_collection(struct hid_parser * parser,unsigned type)849 static void hid_scan_collection(struct hid_parser *parser, unsigned type)
850 {
851 	struct hid_device *hid = parser->device;
852 	int i;
853 
854 	if (((parser->global.usage_page << 16) == HID_UP_SENSOR) &&
855 	    (type == HID_COLLECTION_PHYSICAL ||
856 	     type == HID_COLLECTION_APPLICATION))
857 		hid->group = HID_GROUP_SENSOR_HUB;
858 
859 	if (hid->vendor == USB_VENDOR_ID_MICROSOFT &&
860 	    hid->product == USB_DEVICE_ID_MS_POWER_COVER &&
861 	    hid->group == HID_GROUP_MULTITOUCH)
862 		hid->group = HID_GROUP_GENERIC;
863 
864 	if ((parser->global.usage_page << 16) == HID_UP_GENDESK)
865 		for (i = 0; i < parser->local.usage_index; i++)
866 			if (parser->local.usage[i] == HID_GD_POINTER)
867 				parser->scan_flags |= HID_SCAN_FLAG_GD_POINTER;
868 
869 	if ((parser->global.usage_page << 16) >= HID_UP_MSVENDOR)
870 		parser->scan_flags |= HID_SCAN_FLAG_VENDOR_SPECIFIC;
871 
872 	if ((parser->global.usage_page << 16) == HID_UP_GOOGLEVENDOR)
873 		for (i = 0; i < parser->local.usage_index; i++)
874 			if (parser->local.usage[i] ==
875 					(HID_UP_GOOGLEVENDOR | 0x0001))
876 				parser->device->group =
877 					HID_GROUP_VIVALDI;
878 }
879 
hid_scan_main(struct hid_parser * parser,struct hid_item * item)880 static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
881 {
882 	__u32 data;
883 	int i;
884 
885 	hid_concatenate_last_usage_page(parser);
886 
887 	data = item_udata(item);
888 
889 	switch (item->tag) {
890 	case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
891 		hid_scan_collection(parser, data & 0xff);
892 		break;
893 	case HID_MAIN_ITEM_TAG_END_COLLECTION:
894 		break;
895 	case HID_MAIN_ITEM_TAG_INPUT:
896 		/* ignore constant inputs, they will be ignored by hid-input */
897 		if (data & HID_MAIN_ITEM_CONSTANT)
898 			break;
899 		for (i = 0; i < parser->local.usage_index; i++)
900 			hid_scan_input_usage(parser, parser->local.usage[i]);
901 		break;
902 	case HID_MAIN_ITEM_TAG_OUTPUT:
903 		break;
904 	case HID_MAIN_ITEM_TAG_FEATURE:
905 		for (i = 0; i < parser->local.usage_index; i++)
906 			hid_scan_feature_usage(parser, parser->local.usage[i]);
907 		break;
908 	}
909 
910 	/* Reset the local parser environment */
911 	memset(&parser->local, 0, sizeof(parser->local));
912 
913 	return 0;
914 }
915 
916 /*
917  * Scan a report descriptor before the device is added to the bus.
918  * Sets device groups and other properties that determine what driver
919  * to load.
920  */
hid_scan_report(struct hid_device * hid)921 static int hid_scan_report(struct hid_device *hid)
922 {
923 	struct hid_parser *parser;
924 	struct hid_item item;
925 	const __u8 *start = hid->dev_rdesc;
926 	const __u8 *end = start + hid->dev_rsize;
927 	static int (*dispatch_type[])(struct hid_parser *parser,
928 				      struct hid_item *item) = {
929 		hid_scan_main,
930 		hid_parser_global,
931 		hid_parser_local,
932 		hid_parser_reserved
933 	};
934 
935 	parser = vzalloc(sizeof(struct hid_parser));
936 	if (!parser)
937 		return -ENOMEM;
938 
939 	parser->device = hid;
940 	hid->group = HID_GROUP_GENERIC;
941 
942 	/*
943 	 * The parsing is simpler than the one in hid_open_report() as we should
944 	 * be robust against hid errors. Those errors will be raised by
945 	 * hid_open_report() anyway.
946 	 */
947 	while ((start = fetch_item(start, end, &item)) != NULL)
948 		dispatch_type[item.type](parser, &item);
949 
950 	/*
951 	 * Handle special flags set during scanning.
952 	 */
953 	if ((parser->scan_flags & HID_SCAN_FLAG_MT_WIN_8) &&
954 	    (hid->group == HID_GROUP_MULTITOUCH))
955 		hid->group = HID_GROUP_MULTITOUCH_WIN_8;
956 
957 	/*
958 	 * Vendor specific handlings
959 	 */
960 	switch (hid->vendor) {
961 	case USB_VENDOR_ID_WACOM:
962 		hid->group = HID_GROUP_WACOM;
963 		break;
964 	case USB_VENDOR_ID_SYNAPTICS:
965 		if (hid->group == HID_GROUP_GENERIC)
966 			if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC)
967 			    && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER))
968 				/*
969 				 * hid-rmi should take care of them,
970 				 * not hid-generic
971 				 */
972 				hid->group = HID_GROUP_RMI;
973 		break;
974 	}
975 
976 	kfree(parser->collection_stack);
977 	vfree(parser);
978 	return 0;
979 }
980 
981 /**
982  * hid_parse_report - parse device report
983  *
984  * @hid: hid device
985  * @start: report start
986  * @size: report size
987  *
988  * Allocate the device report as read by the bus driver. This function should
989  * only be called from parse() in ll drivers.
990  */
hid_parse_report(struct hid_device * hid,const __u8 * start,unsigned size)991 int hid_parse_report(struct hid_device *hid, const __u8 *start, unsigned size)
992 {
993 	hid->dev_rdesc = kmemdup(start, size, GFP_KERNEL);
994 	if (!hid->dev_rdesc)
995 		return -ENOMEM;
996 	hid->dev_rsize = size;
997 	return 0;
998 }
999 EXPORT_SYMBOL_GPL(hid_parse_report);
1000 
1001 static const char * const hid_report_names[] = {
1002 	"HID_INPUT_REPORT",
1003 	"HID_OUTPUT_REPORT",
1004 	"HID_FEATURE_REPORT",
1005 };
1006 /**
1007  * hid_validate_values - validate existing device report's value indexes
1008  *
1009  * @hid: hid device
1010  * @type: which report type to examine
1011  * @id: which report ID to examine (0 for first)
1012  * @field_index: which report field to examine
1013  * @report_counts: expected number of values
1014  *
1015  * Validate the number of values in a given field of a given report, after
1016  * parsing.
1017  */
hid_validate_values(struct hid_device * hid,enum hid_report_type type,unsigned int id,unsigned int field_index,unsigned int report_counts)1018 struct hid_report *hid_validate_values(struct hid_device *hid,
1019 				       enum hid_report_type type, unsigned int id,
1020 				       unsigned int field_index,
1021 				       unsigned int report_counts)
1022 {
1023 	struct hid_report *report;
1024 
1025 	if (type > HID_FEATURE_REPORT) {
1026 		hid_err(hid, "invalid HID report type %u\n", type);
1027 		return NULL;
1028 	}
1029 
1030 	if (id >= HID_MAX_IDS) {
1031 		hid_err(hid, "invalid HID report id %u\n", id);
1032 		return NULL;
1033 	}
1034 
1035 	/*
1036 	 * Explicitly not using hid_get_report() here since it depends on
1037 	 * ->numbered being checked, which may not always be the case when
1038 	 * drivers go to access report values.
1039 	 */
1040 	if (id == 0) {
1041 		/*
1042 		 * Validating on id 0 means we should examine the first
1043 		 * report in the list.
1044 		 */
1045 		report = list_first_entry_or_null(
1046 				&hid->report_enum[type].report_list,
1047 				struct hid_report, list);
1048 	} else {
1049 		report = hid->report_enum[type].report_id_hash[id];
1050 	}
1051 	if (!report) {
1052 		hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
1053 		return NULL;
1054 	}
1055 	if (report->maxfield <= field_index) {
1056 		hid_err(hid, "not enough fields in %s %u\n",
1057 			hid_report_names[type], id);
1058 		return NULL;
1059 	}
1060 	if (report->field[field_index]->report_count < report_counts) {
1061 		hid_err(hid, "not enough values in %s %u field %u\n",
1062 			hid_report_names[type], id, field_index);
1063 		return NULL;
1064 	}
1065 	return report;
1066 }
1067 EXPORT_SYMBOL_GPL(hid_validate_values);
1068 
hid_calculate_multiplier(struct hid_device * hid,struct hid_field * multiplier)1069 static int hid_calculate_multiplier(struct hid_device *hid,
1070 				     struct hid_field *multiplier)
1071 {
1072 	int m;
1073 	__s32 v = *multiplier->value;
1074 	__s32 lmin = multiplier->logical_minimum;
1075 	__s32 lmax = multiplier->logical_maximum;
1076 	__s32 pmin = multiplier->physical_minimum;
1077 	__s32 pmax = multiplier->physical_maximum;
1078 
1079 	/*
1080 	 * "Because OS implementations will generally divide the control's
1081 	 * reported count by the Effective Resolution Multiplier, designers
1082 	 * should take care not to establish a potential Effective
1083 	 * Resolution Multiplier of zero."
1084 	 * HID Usage Table, v1.12, Section 4.3.1, p31
1085 	 */
1086 	if (lmax - lmin == 0)
1087 		return 1;
1088 	/*
1089 	 * Handling the unit exponent is left as an exercise to whoever
1090 	 * finds a device where that exponent is not 0.
1091 	 */
1092 	m = ((v - lmin)/(lmax - lmin) * (pmax - pmin) + pmin);
1093 	if (unlikely(multiplier->unit_exponent != 0)) {
1094 		hid_warn(hid,
1095 			 "unsupported Resolution Multiplier unit exponent %d\n",
1096 			 multiplier->unit_exponent);
1097 	}
1098 
1099 	/* There are no devices with an effective multiplier > 255 */
1100 	if (unlikely(m == 0 || m > 255 || m < -255)) {
1101 		hid_warn(hid, "unsupported Resolution Multiplier %d\n", m);
1102 		m = 1;
1103 	}
1104 
1105 	return m;
1106 }
1107 
hid_apply_multiplier_to_field(struct hid_device * hid,struct hid_field * field,struct hid_collection * multiplier_collection,int effective_multiplier)1108 static void hid_apply_multiplier_to_field(struct hid_device *hid,
1109 					  struct hid_field *field,
1110 					  struct hid_collection *multiplier_collection,
1111 					  int effective_multiplier)
1112 {
1113 	struct hid_collection *collection;
1114 	struct hid_usage *usage;
1115 	int i;
1116 
1117 	/*
1118 	 * If multiplier_collection is NULL, the multiplier applies
1119 	 * to all fields in the report.
1120 	 * Otherwise, it is the Logical Collection the multiplier applies to
1121 	 * but our field may be in a subcollection of that collection.
1122 	 */
1123 	for (i = 0; i < field->maxusage; i++) {
1124 		usage = &field->usage[i];
1125 
1126 		collection = &hid->collection[usage->collection_index];
1127 		while (collection->parent_idx != -1 &&
1128 		       collection != multiplier_collection)
1129 			collection = &hid->collection[collection->parent_idx];
1130 
1131 		if (collection->parent_idx != -1 ||
1132 		    multiplier_collection == NULL)
1133 			usage->resolution_multiplier = effective_multiplier;
1134 
1135 	}
1136 }
1137 
hid_apply_multiplier(struct hid_device * hid,struct hid_field * multiplier)1138 static void hid_apply_multiplier(struct hid_device *hid,
1139 				 struct hid_field *multiplier)
1140 {
1141 	struct hid_report_enum *rep_enum;
1142 	struct hid_report *rep;
1143 	struct hid_field *field;
1144 	struct hid_collection *multiplier_collection;
1145 	int effective_multiplier;
1146 	int i;
1147 
1148 	/*
1149 	 * "The Resolution Multiplier control must be contained in the same
1150 	 * Logical Collection as the control(s) to which it is to be applied.
1151 	 * If no Resolution Multiplier is defined, then the Resolution
1152 	 * Multiplier defaults to 1.  If more than one control exists in a
1153 	 * Logical Collection, the Resolution Multiplier is associated with
1154 	 * all controls in the collection. If no Logical Collection is
1155 	 * defined, the Resolution Multiplier is associated with all
1156 	 * controls in the report."
1157 	 * HID Usage Table, v1.12, Section 4.3.1, p30
1158 	 *
1159 	 * Thus, search from the current collection upwards until we find a
1160 	 * logical collection. Then search all fields for that same parent
1161 	 * collection. Those are the fields the multiplier applies to.
1162 	 *
1163 	 * If we have more than one multiplier, it will overwrite the
1164 	 * applicable fields later.
1165 	 */
1166 	multiplier_collection = &hid->collection[multiplier->usage->collection_index];
1167 	while (multiplier_collection->parent_idx != -1 &&
1168 	       multiplier_collection->type != HID_COLLECTION_LOGICAL)
1169 		multiplier_collection = &hid->collection[multiplier_collection->parent_idx];
1170 	if (multiplier_collection->type != HID_COLLECTION_LOGICAL)
1171 		multiplier_collection = NULL;
1172 
1173 	effective_multiplier = hid_calculate_multiplier(hid, multiplier);
1174 
1175 	rep_enum = &hid->report_enum[HID_INPUT_REPORT];
1176 	list_for_each_entry(rep, &rep_enum->report_list, list) {
1177 		for (i = 0; i < rep->maxfield; i++) {
1178 			field = rep->field[i];
1179 			hid_apply_multiplier_to_field(hid, field,
1180 						      multiplier_collection,
1181 						      effective_multiplier);
1182 		}
1183 	}
1184 }
1185 
1186 /*
1187  * hid_setup_resolution_multiplier - set up all resolution multipliers
1188  *
1189  * @device: hid device
1190  *
1191  * Search for all Resolution Multiplier Feature Reports and apply their
1192  * value to all matching Input items. This only updates the internal struct
1193  * fields.
1194  *
1195  * The Resolution Multiplier is applied by the hardware. If the multiplier
1196  * is anything other than 1, the hardware will send pre-multiplied events
1197  * so that the same physical interaction generates an accumulated
1198  *	accumulated_value = value * * multiplier
1199  * This may be achieved by sending
1200  * - "value * multiplier" for each event, or
1201  * - "value" but "multiplier" times as frequently, or
1202  * - a combination of the above
1203  * The only guarantee is that the same physical interaction always generates
1204  * an accumulated 'value * multiplier'.
1205  *
1206  * This function must be called before any event processing and after
1207  * any SetRequest to the Resolution Multiplier.
1208  */
hid_setup_resolution_multiplier(struct hid_device * hid)1209 void hid_setup_resolution_multiplier(struct hid_device *hid)
1210 {
1211 	struct hid_report_enum *rep_enum;
1212 	struct hid_report *rep;
1213 	struct hid_usage *usage;
1214 	int i, j;
1215 
1216 	rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
1217 	list_for_each_entry(rep, &rep_enum->report_list, list) {
1218 		for (i = 0; i < rep->maxfield; i++) {
1219 			/* Ignore if report count is out of bounds. */
1220 			if (rep->field[i]->report_count < 1)
1221 				continue;
1222 
1223 			for (j = 0; j < rep->field[i]->maxusage; j++) {
1224 				usage = &rep->field[i]->usage[j];
1225 				if (usage->hid == HID_GD_RESOLUTION_MULTIPLIER)
1226 					hid_apply_multiplier(hid,
1227 							     rep->field[i]);
1228 			}
1229 		}
1230 	}
1231 }
1232 EXPORT_SYMBOL_GPL(hid_setup_resolution_multiplier);
1233 
1234 /**
1235  * hid_open_report - open a driver-specific device report
1236  *
1237  * @device: hid device
1238  *
1239  * Parse a report description into a hid_device structure. Reports are
1240  * enumerated, fields are attached to these reports.
1241  * 0 returned on success, otherwise nonzero error value.
1242  *
1243  * This function (or the equivalent hid_parse() macro) should only be
1244  * called from probe() in drivers, before starting the device.
1245  */
hid_open_report(struct hid_device * device)1246 int hid_open_report(struct hid_device *device)
1247 {
1248 	struct hid_parser *parser;
1249 	struct hid_item item;
1250 	unsigned int size;
1251 	const __u8 *start;
1252 	const __u8 *end;
1253 	const __u8 *next;
1254 	int ret;
1255 	int i;
1256 	static int (*dispatch_type[])(struct hid_parser *parser,
1257 				      struct hid_item *item) = {
1258 		hid_parser_main,
1259 		hid_parser_global,
1260 		hid_parser_local,
1261 		hid_parser_reserved
1262 	};
1263 
1264 	if (WARN_ON(device->status & HID_STAT_PARSED))
1265 		return -EBUSY;
1266 
1267 	start = device->bpf_rdesc;
1268 	if (WARN_ON(!start))
1269 		return -ENODEV;
1270 	size = device->bpf_rsize;
1271 
1272 	if (device->driver->report_fixup) {
1273 		/*
1274 		 * device->driver->report_fixup() needs to work
1275 		 * on a copy of our report descriptor so it can
1276 		 * change it.
1277 		 */
1278 		__u8 *buf = kmemdup(start, size, GFP_KERNEL);
1279 
1280 		if (buf == NULL)
1281 			return -ENOMEM;
1282 
1283 		start = device->driver->report_fixup(device, buf, &size);
1284 
1285 		/*
1286 		 * The second kmemdup is required in case report_fixup() returns
1287 		 * a static read-only memory, but we have no idea if that memory
1288 		 * needs to be cleaned up or not at the end.
1289 		 */
1290 		start = kmemdup(start, size, GFP_KERNEL);
1291 		kfree(buf);
1292 		if (start == NULL)
1293 			return -ENOMEM;
1294 	}
1295 
1296 	device->rdesc = start;
1297 	device->rsize = size;
1298 
1299 	parser = vzalloc(sizeof(struct hid_parser));
1300 	if (!parser) {
1301 		ret = -ENOMEM;
1302 		goto alloc_err;
1303 	}
1304 
1305 	parser->device = device;
1306 
1307 	end = start + size;
1308 
1309 	device->collection = kcalloc(HID_DEFAULT_NUM_COLLECTIONS,
1310 				     sizeof(struct hid_collection), GFP_KERNEL);
1311 	if (!device->collection) {
1312 		ret = -ENOMEM;
1313 		goto err;
1314 	}
1315 	device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
1316 	for (i = 0; i < HID_DEFAULT_NUM_COLLECTIONS; i++)
1317 		device->collection[i].parent_idx = -1;
1318 
1319 	ret = -EINVAL;
1320 	while ((next = fetch_item(start, end, &item)) != NULL) {
1321 		start = next;
1322 
1323 		if (item.format != HID_ITEM_FORMAT_SHORT) {
1324 			hid_err(device, "unexpected long global item\n");
1325 			goto err;
1326 		}
1327 
1328 		if (dispatch_type[item.type](parser, &item)) {
1329 			hid_err(device, "item %u %u %u %u parsing failed\n",
1330 				item.format, (unsigned)item.size,
1331 				(unsigned)item.type, (unsigned)item.tag);
1332 			goto err;
1333 		}
1334 
1335 		if (start == end) {
1336 			if (parser->collection_stack_ptr) {
1337 				hid_err(device, "unbalanced collection at end of report description\n");
1338 				goto err;
1339 			}
1340 			if (parser->local.delimiter_depth) {
1341 				hid_err(device, "unbalanced delimiter at end of report description\n");
1342 				goto err;
1343 			}
1344 
1345 			/*
1346 			 * fetch initial values in case the device's
1347 			 * default multiplier isn't the recommended 1
1348 			 */
1349 			hid_setup_resolution_multiplier(device);
1350 
1351 			kfree(parser->collection_stack);
1352 			vfree(parser);
1353 			device->status |= HID_STAT_PARSED;
1354 
1355 			return 0;
1356 		}
1357 	}
1358 
1359 	hid_err(device, "item fetching failed at offset %u/%u\n",
1360 		size - (unsigned int)(end - start), size);
1361 err:
1362 	kfree(parser->collection_stack);
1363 alloc_err:
1364 	vfree(parser);
1365 	hid_close_report(device);
1366 	return ret;
1367 }
1368 EXPORT_SYMBOL_GPL(hid_open_report);
1369 
1370 /*
1371  * Extract/implement a data field from/to a little endian report (bit array).
1372  *
1373  * Code sort-of follows HID spec:
1374  *     http://www.usb.org/developers/hidpage/HID1_11.pdf
1375  *
1376  * While the USB HID spec allows unlimited length bit fields in "report
1377  * descriptors", most devices never use more than 16 bits.
1378  * One model of UPS is claimed to report "LINEV" as a 32-bit field.
1379  * Search linux-kernel and linux-usb-devel archives for "hid-core extract".
1380  */
1381 
__extract(u8 * report,unsigned offset,int n)1382 static u32 __extract(u8 *report, unsigned offset, int n)
1383 {
1384 	unsigned int idx = offset / 8;
1385 	unsigned int bit_nr = 0;
1386 	unsigned int bit_shift = offset % 8;
1387 	int bits_to_copy = 8 - bit_shift;
1388 	u32 value = 0;
1389 	u32 mask = n < 32 ? (1U << n) - 1 : ~0U;
1390 
1391 	while (n > 0) {
1392 		value |= ((u32)report[idx] >> bit_shift) << bit_nr;
1393 		n -= bits_to_copy;
1394 		bit_nr += bits_to_copy;
1395 		bits_to_copy = 8;
1396 		bit_shift = 0;
1397 		idx++;
1398 	}
1399 
1400 	return value & mask;
1401 }
1402 
hid_field_extract(const struct hid_device * hid,u8 * report,unsigned offset,unsigned n)1403 u32 hid_field_extract(const struct hid_device *hid, u8 *report,
1404 			unsigned offset, unsigned n)
1405 {
1406 	if (n > 32) {
1407 		hid_warn_once(hid, "%s() called with n (%d) > 32! (%s)\n",
1408 			      __func__, n, current->comm);
1409 		n = 32;
1410 	}
1411 
1412 	return __extract(report, offset, n);
1413 }
1414 EXPORT_SYMBOL_GPL(hid_field_extract);
1415 
1416 /*
1417  * "implement" : set bits in a little endian bit stream.
1418  * Same concepts as "extract" (see comments above).
1419  * The data mangled in the bit stream remains in little endian
1420  * order the whole time. It make more sense to talk about
1421  * endianness of register values by considering a register
1422  * a "cached" copy of the little endian bit stream.
1423  */
1424 
__implement(u8 * report,unsigned offset,int n,u32 value)1425 static void __implement(u8 *report, unsigned offset, int n, u32 value)
1426 {
1427 	unsigned int idx = offset / 8;
1428 	unsigned int bit_shift = offset % 8;
1429 	int bits_to_set = 8 - bit_shift;
1430 
1431 	while (n - bits_to_set >= 0) {
1432 		report[idx] &= ~(0xff << bit_shift);
1433 		report[idx] |= value << bit_shift;
1434 		value >>= bits_to_set;
1435 		n -= bits_to_set;
1436 		bits_to_set = 8;
1437 		bit_shift = 0;
1438 		idx++;
1439 	}
1440 
1441 	/* last nibble */
1442 	if (n) {
1443 		u8 bit_mask = ((1U << n) - 1);
1444 		report[idx] &= ~(bit_mask << bit_shift);
1445 		report[idx] |= value << bit_shift;
1446 	}
1447 }
1448 
implement(const struct hid_device * hid,u8 * report,unsigned offset,unsigned n,u32 value)1449 static void implement(const struct hid_device *hid, u8 *report,
1450 		      unsigned offset, unsigned n, u32 value)
1451 {
1452 	if (unlikely(n > 32)) {
1453 		hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n",
1454 			 __func__, n, current->comm);
1455 		n = 32;
1456 	} else if (n < 32) {
1457 		u32 m = (1U << n) - 1;
1458 
1459 		if (unlikely(value > m)) {
1460 			hid_warn(hid,
1461 				 "%s() called with too large value %d (n: %d)! (%s)\n",
1462 				 __func__, value, n, current->comm);
1463 			value &= m;
1464 		}
1465 	}
1466 
1467 	__implement(report, offset, n, value);
1468 }
1469 
1470 /*
1471  * Search an array for a value.
1472  */
1473 
search(__s32 * array,__s32 value,unsigned n)1474 static int search(__s32 *array, __s32 value, unsigned n)
1475 {
1476 	while (n--) {
1477 		if (*array++ == value)
1478 			return 0;
1479 	}
1480 	return -1;
1481 }
1482 
1483 /**
1484  * hid_match_report - check if driver's raw_event should be called
1485  *
1486  * @hid: hid device
1487  * @report: hid report to match against
1488  *
1489  * compare hid->driver->report_table->report_type to report->type
1490  */
hid_match_report(struct hid_device * hid,struct hid_report * report)1491 static int hid_match_report(struct hid_device *hid, struct hid_report *report)
1492 {
1493 	const struct hid_report_id *id = hid->driver->report_table;
1494 
1495 	if (!id) /* NULL means all */
1496 		return 1;
1497 
1498 	for (; id->report_type != HID_TERMINATOR; id++)
1499 		if (id->report_type == HID_ANY_ID ||
1500 				id->report_type == report->type)
1501 			return 1;
1502 	return 0;
1503 }
1504 
1505 /**
1506  * hid_match_usage - check if driver's event should be called
1507  *
1508  * @hid: hid device
1509  * @usage: usage to match against
1510  *
1511  * compare hid->driver->usage_table->usage_{type,code} to
1512  * usage->usage_{type,code}
1513  */
hid_match_usage(struct hid_device * hid,struct hid_usage * usage)1514 static int hid_match_usage(struct hid_device *hid, struct hid_usage *usage)
1515 {
1516 	const struct hid_usage_id *id = hid->driver->usage_table;
1517 
1518 	if (!id) /* NULL means all */
1519 		return 1;
1520 
1521 	for (; id->usage_type != HID_ANY_ID - 1; id++)
1522 		if ((id->usage_hid == HID_ANY_ID ||
1523 				id->usage_hid == usage->hid) &&
1524 				(id->usage_type == HID_ANY_ID ||
1525 				id->usage_type == usage->type) &&
1526 				(id->usage_code == HID_ANY_ID ||
1527 				 id->usage_code == usage->code))
1528 			return 1;
1529 	return 0;
1530 }
1531 
hid_process_event(struct hid_device * hid,struct hid_field * field,struct hid_usage * usage,__s32 value,int interrupt)1532 static void hid_process_event(struct hid_device *hid, struct hid_field *field,
1533 		struct hid_usage *usage, __s32 value, int interrupt)
1534 {
1535 	struct hid_driver *hdrv = hid->driver;
1536 	int ret;
1537 
1538 	if (!list_empty(&hid->debug_list))
1539 		hid_dump_input(hid, usage, value);
1540 
1541 	if (hdrv && hdrv->event && hid_match_usage(hid, usage)) {
1542 		ret = hdrv->event(hid, field, usage, value);
1543 		if (ret != 0) {
1544 			if (ret < 0)
1545 				hid_err(hid, "%s's event failed with %d\n",
1546 						hdrv->name, ret);
1547 			return;
1548 		}
1549 	}
1550 
1551 	if (hid->claimed & HID_CLAIMED_INPUT)
1552 		hidinput_hid_event(hid, field, usage, value);
1553 	if (hid->claimed & HID_CLAIMED_HIDDEV && interrupt && hid->hiddev_hid_event)
1554 		hid->hiddev_hid_event(hid, field, usage, value);
1555 }
1556 
1557 /*
1558  * Checks if the given value is valid within this field
1559  */
hid_array_value_is_valid(struct hid_field * field,__s32 value)1560 static inline int hid_array_value_is_valid(struct hid_field *field,
1561 					   __s32 value)
1562 {
1563 	__s32 min = field->logical_minimum;
1564 
1565 	/*
1566 	 * Value needs to be between logical min and max, and
1567 	 * (value - min) is used as an index in the usage array.
1568 	 * This array is of size field->maxusage
1569 	 */
1570 	return value >= min &&
1571 	       value <= field->logical_maximum &&
1572 	       value - min < field->maxusage;
1573 }
1574 
1575 /*
1576  * Fetch the field from the data. The field content is stored for next
1577  * report processing (we do differential reporting to the layer).
1578  */
hid_input_fetch_field(struct hid_device * hid,struct hid_field * field,__u8 * data)1579 static void hid_input_fetch_field(struct hid_device *hid,
1580 				  struct hid_field *field,
1581 				  __u8 *data)
1582 {
1583 	unsigned n;
1584 	unsigned count = field->report_count;
1585 	unsigned offset = field->report_offset;
1586 	unsigned size = field->report_size;
1587 	__s32 min = field->logical_minimum;
1588 	__s32 *value;
1589 
1590 	value = field->new_value;
1591 	memset(value, 0, count * sizeof(__s32));
1592 	field->ignored = false;
1593 
1594 	for (n = 0; n < count; n++) {
1595 
1596 		value[n] = min < 0 ?
1597 			snto32(hid_field_extract(hid, data, offset + n * size,
1598 			       size), size) :
1599 			hid_field_extract(hid, data, offset + n * size, size);
1600 
1601 		/* Ignore report if ErrorRollOver */
1602 		if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
1603 		    hid_array_value_is_valid(field, value[n]) &&
1604 		    field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1) {
1605 			field->ignored = true;
1606 			return;
1607 		}
1608 	}
1609 }
1610 
1611 /*
1612  * Process a received variable field.
1613  */
1614 
hid_input_var_field(struct hid_device * hid,struct hid_field * field,int interrupt)1615 static void hid_input_var_field(struct hid_device *hid,
1616 				struct hid_field *field,
1617 				int interrupt)
1618 {
1619 	unsigned int count = field->report_count;
1620 	__s32 *value = field->new_value;
1621 	unsigned int n;
1622 
1623 	for (n = 0; n < count; n++)
1624 		hid_process_event(hid,
1625 				  field,
1626 				  &field->usage[n],
1627 				  value[n],
1628 				  interrupt);
1629 
1630 	memcpy(field->value, value, count * sizeof(__s32));
1631 }
1632 
1633 /*
1634  * Process a received array field. The field content is stored for
1635  * next report processing (we do differential reporting to the layer).
1636  */
1637 
hid_input_array_field(struct hid_device * hid,struct hid_field * field,int interrupt)1638 static void hid_input_array_field(struct hid_device *hid,
1639 				  struct hid_field *field,
1640 				  int interrupt)
1641 {
1642 	unsigned int n;
1643 	unsigned int count = field->report_count;
1644 	__s32 min = field->logical_minimum;
1645 	__s32 *value;
1646 
1647 	value = field->new_value;
1648 
1649 	/* ErrorRollOver */
1650 	if (field->ignored)
1651 		return;
1652 
1653 	for (n = 0; n < count; n++) {
1654 		if (hid_array_value_is_valid(field, field->value[n]) &&
1655 		    search(value, field->value[n], count))
1656 			hid_process_event(hid,
1657 					  field,
1658 					  &field->usage[field->value[n] - min],
1659 					  0,
1660 					  interrupt);
1661 
1662 		if (hid_array_value_is_valid(field, value[n]) &&
1663 		    search(field->value, value[n], count))
1664 			hid_process_event(hid,
1665 					  field,
1666 					  &field->usage[value[n] - min],
1667 					  1,
1668 					  interrupt);
1669 	}
1670 
1671 	memcpy(field->value, value, count * sizeof(__s32));
1672 }
1673 
1674 /*
1675  * Analyse a received report, and fetch the data from it. The field
1676  * content is stored for next report processing (we do differential
1677  * reporting to the layer).
1678  */
hid_process_report(struct hid_device * hid,struct hid_report * report,__u8 * data,int interrupt)1679 static void hid_process_report(struct hid_device *hid,
1680 			       struct hid_report *report,
1681 			       __u8 *data,
1682 			       int interrupt)
1683 {
1684 	unsigned int a;
1685 	struct hid_field_entry *entry;
1686 	struct hid_field *field;
1687 
1688 	/* first retrieve all incoming values in data */
1689 	for (a = 0; a < report->maxfield; a++)
1690 		hid_input_fetch_field(hid, report->field[a], data);
1691 
1692 	if (!list_empty(&report->field_entry_list)) {
1693 		/* INPUT_REPORT, we have a priority list of fields */
1694 		list_for_each_entry(entry,
1695 				    &report->field_entry_list,
1696 				    list) {
1697 			field = entry->field;
1698 
1699 			if (field->flags & HID_MAIN_ITEM_VARIABLE)
1700 				hid_process_event(hid,
1701 						  field,
1702 						  &field->usage[entry->index],
1703 						  field->new_value[entry->index],
1704 						  interrupt);
1705 			else
1706 				hid_input_array_field(hid, field, interrupt);
1707 		}
1708 
1709 		/* we need to do the memcpy at the end for var items */
1710 		for (a = 0; a < report->maxfield; a++) {
1711 			field = report->field[a];
1712 
1713 			if (field->flags & HID_MAIN_ITEM_VARIABLE)
1714 				memcpy(field->value, field->new_value,
1715 				       field->report_count * sizeof(__s32));
1716 		}
1717 	} else {
1718 		/* FEATURE_REPORT, regular processing */
1719 		for (a = 0; a < report->maxfield; a++) {
1720 			field = report->field[a];
1721 
1722 			if (field->flags & HID_MAIN_ITEM_VARIABLE)
1723 				hid_input_var_field(hid, field, interrupt);
1724 			else
1725 				hid_input_array_field(hid, field, interrupt);
1726 		}
1727 	}
1728 }
1729 
1730 /*
1731  * Insert a given usage_index in a field in the list
1732  * of processed usages in the report.
1733  *
1734  * The elements of lower priority score are processed
1735  * first.
1736  */
__hid_insert_field_entry(struct hid_device * hid,struct hid_report * report,struct hid_field_entry * entry,struct hid_field * field,unsigned int usage_index)1737 static void __hid_insert_field_entry(struct hid_device *hid,
1738 				     struct hid_report *report,
1739 				     struct hid_field_entry *entry,
1740 				     struct hid_field *field,
1741 				     unsigned int usage_index)
1742 {
1743 	struct hid_field_entry *next;
1744 
1745 	entry->field = field;
1746 	entry->index = usage_index;
1747 	entry->priority = field->usages_priorities[usage_index];
1748 
1749 	/* insert the element at the correct position */
1750 	list_for_each_entry(next,
1751 			    &report->field_entry_list,
1752 			    list) {
1753 		/*
1754 		 * the priority of our element is strictly higher
1755 		 * than the next one, insert it before
1756 		 */
1757 		if (entry->priority > next->priority) {
1758 			list_add_tail(&entry->list, &next->list);
1759 			return;
1760 		}
1761 	}
1762 
1763 	/* lowest priority score: insert at the end */
1764 	list_add_tail(&entry->list, &report->field_entry_list);
1765 }
1766 
hid_report_process_ordering(struct hid_device * hid,struct hid_report * report)1767 static void hid_report_process_ordering(struct hid_device *hid,
1768 					struct hid_report *report)
1769 {
1770 	struct hid_field *field;
1771 	struct hid_field_entry *entries;
1772 	unsigned int a, u, usages;
1773 	unsigned int count = 0;
1774 
1775 	/* count the number of individual fields in the report */
1776 	for (a = 0; a < report->maxfield; a++) {
1777 		field = report->field[a];
1778 
1779 		if (field->flags & HID_MAIN_ITEM_VARIABLE)
1780 			count += field->report_count;
1781 		else
1782 			count++;
1783 	}
1784 
1785 	/* allocate the memory to process the fields */
1786 	entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
1787 	if (!entries)
1788 		return;
1789 
1790 	report->field_entries = entries;
1791 
1792 	/*
1793 	 * walk through all fields in the report and
1794 	 * store them by priority order in report->field_entry_list
1795 	 *
1796 	 * - Var elements are individualized (field + usage_index)
1797 	 * - Arrays are taken as one, we can not chose an order for them
1798 	 */
1799 	usages = 0;
1800 	for (a = 0; a < report->maxfield; a++) {
1801 		field = report->field[a];
1802 
1803 		if (field->flags & HID_MAIN_ITEM_VARIABLE) {
1804 			for (u = 0; u < field->report_count; u++) {
1805 				__hid_insert_field_entry(hid, report,
1806 							 &entries[usages],
1807 							 field, u);
1808 				usages++;
1809 			}
1810 		} else {
1811 			__hid_insert_field_entry(hid, report, &entries[usages],
1812 						 field, 0);
1813 			usages++;
1814 		}
1815 	}
1816 }
1817 
hid_process_ordering(struct hid_device * hid)1818 static void hid_process_ordering(struct hid_device *hid)
1819 {
1820 	struct hid_report *report;
1821 	struct hid_report_enum *report_enum = &hid->report_enum[HID_INPUT_REPORT];
1822 
1823 	list_for_each_entry(report, &report_enum->report_list, list)
1824 		hid_report_process_ordering(hid, report);
1825 }
1826 
1827 /*
1828  * Output the field into the report.
1829  */
1830 
hid_output_field(const struct hid_device * hid,struct hid_field * field,__u8 * data)1831 static void hid_output_field(const struct hid_device *hid,
1832 			     struct hid_field *field, __u8 *data)
1833 {
1834 	unsigned count = field->report_count;
1835 	unsigned offset = field->report_offset;
1836 	unsigned size = field->report_size;
1837 	unsigned n;
1838 
1839 	for (n = 0; n < count; n++) {
1840 		if (field->logical_minimum < 0)	/* signed values */
1841 			implement(hid, data, offset + n * size, size,
1842 				  s32ton(field->value[n], size));
1843 		else				/* unsigned values */
1844 			implement(hid, data, offset + n * size, size,
1845 				  field->value[n]);
1846 	}
1847 }
1848 
1849 /*
1850  * Compute the size of a report.
1851  */
hid_compute_report_size(struct hid_report * report)1852 static size_t hid_compute_report_size(struct hid_report *report)
1853 {
1854 	if (report->size)
1855 		return ((report->size - 1) >> 3) + 1;
1856 
1857 	return 0;
1858 }
1859 
1860 /*
1861  * Create a report. 'data' has to be allocated using
1862  * hid_alloc_report_buf() so that it has proper size.
1863  */
1864 
hid_output_report(struct hid_report * report,__u8 * data)1865 void hid_output_report(struct hid_report *report, __u8 *data)
1866 {
1867 	unsigned n;
1868 
1869 	if (report->id > 0)
1870 		*data++ = report->id;
1871 
1872 	memset(data, 0, hid_compute_report_size(report));
1873 	for (n = 0; n < report->maxfield; n++)
1874 		hid_output_field(report->device, report->field[n], data);
1875 }
1876 EXPORT_SYMBOL_GPL(hid_output_report);
1877 
1878 /*
1879  * Allocator for buffer that is going to be passed to hid_output_report()
1880  */
hid_alloc_report_buf(struct hid_report * report,gfp_t flags)1881 u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
1882 {
1883 	/*
1884 	 * 7 extra bytes are necessary to achieve proper functionality
1885 	 * of implement() working on 8 byte chunks
1886 	 * 1 extra byte for the report ID if it is null (not used) so
1887 	 * we can reserve that extra byte in the first position of the buffer
1888 	 * when sending it to .raw_request()
1889 	 */
1890 
1891 	u32 len = hid_report_len(report) + 7 + (report->id == 0);
1892 
1893 	return kzalloc(len, flags);
1894 }
1895 EXPORT_SYMBOL_GPL(hid_alloc_report_buf);
1896 
1897 /*
1898  * Set a field value. The report this field belongs to has to be
1899  * created and transferred to the device, to set this value in the
1900  * device.
1901  */
1902 
hid_set_field(struct hid_field * field,unsigned offset,__s32 value)1903 int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
1904 {
1905 	unsigned size;
1906 
1907 	if (!field)
1908 		return -1;
1909 
1910 	size = field->report_size;
1911 
1912 	hid_dump_input(field->report->device, field->usage + offset, value);
1913 
1914 	if (offset >= field->report_count) {
1915 		hid_err(field->report->device, "offset (%d) exceeds report_count (%d)\n",
1916 				offset, field->report_count);
1917 		return -1;
1918 	}
1919 	if (field->logical_minimum < 0) {
1920 		if (value != snto32(s32ton(value, size), size)) {
1921 			hid_err(field->report->device, "value %d is out of range\n", value);
1922 			return -1;
1923 		}
1924 	}
1925 	field->value[offset] = value;
1926 	return 0;
1927 }
1928 EXPORT_SYMBOL_GPL(hid_set_field);
1929 
hid_find_field(struct hid_device * hdev,unsigned int report_type,unsigned int application,unsigned int usage)1930 struct hid_field *hid_find_field(struct hid_device *hdev, unsigned int report_type,
1931 				 unsigned int application, unsigned int usage)
1932 {
1933 	struct list_head *report_list = &hdev->report_enum[report_type].report_list;
1934 	struct hid_report *report;
1935 	int i, j;
1936 
1937 	list_for_each_entry(report, report_list, list) {
1938 		if (report->application != application)
1939 			continue;
1940 
1941 		for (i = 0; i < report->maxfield; i++) {
1942 			struct hid_field *field = report->field[i];
1943 
1944 			for (j = 0; j < field->maxusage; j++) {
1945 				if (field->usage[j].hid == usage)
1946 					return field;
1947 			}
1948 		}
1949 	}
1950 
1951 	return NULL;
1952 }
1953 EXPORT_SYMBOL_GPL(hid_find_field);
1954 
hid_get_report(struct hid_report_enum * report_enum,const u8 * data)1955 static struct hid_report *hid_get_report(struct hid_report_enum *report_enum,
1956 		const u8 *data)
1957 {
1958 	struct hid_report *report;
1959 	unsigned int n = 0;	/* Normally report number is 0 */
1960 
1961 	/* Device uses numbered reports, data[0] is report number */
1962 	if (report_enum->numbered)
1963 		n = *data;
1964 
1965 	report = report_enum->report_id_hash[n];
1966 	if (report == NULL)
1967 		dbg_hid("undefined report_id %u received\n", n);
1968 
1969 	return report;
1970 }
1971 
1972 /*
1973  * Implement a generic .request() callback, using .raw_request()
1974  * DO NOT USE in hid drivers directly, but through hid_hw_request instead.
1975  */
__hid_request(struct hid_device * hid,struct hid_report * report,enum hid_class_request reqtype)1976 int __hid_request(struct hid_device *hid, struct hid_report *report,
1977 		enum hid_class_request reqtype)
1978 {
1979 	char *buf, *data_buf;
1980 	int ret;
1981 	u32 len;
1982 
1983 	buf = hid_alloc_report_buf(report, GFP_KERNEL);
1984 	if (!buf)
1985 		return -ENOMEM;
1986 
1987 	data_buf = buf;
1988 	len = hid_report_len(report);
1989 
1990 	if (report->id == 0) {
1991 		/* reserve the first byte for the report ID */
1992 		data_buf++;
1993 		len++;
1994 	}
1995 
1996 	if (reqtype == HID_REQ_SET_REPORT)
1997 		hid_output_report(report, data_buf);
1998 
1999 	ret = hid_hw_raw_request(hid, report->id, buf, len, report->type, reqtype);
2000 	if (ret < 0) {
2001 		dbg_hid("unable to complete request: %d\n", ret);
2002 		goto out;
2003 	}
2004 
2005 	if (reqtype == HID_REQ_GET_REPORT)
2006 		hid_input_report(hid, report->type, buf, ret, 0);
2007 
2008 	ret = 0;
2009 
2010 out:
2011 	kfree(buf);
2012 	return ret;
2013 }
2014 EXPORT_SYMBOL_GPL(__hid_request);
2015 
hid_report_raw_event(struct hid_device * hid,enum hid_report_type type,u8 * data,u32 size,int interrupt)2016 int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
2017 			 int interrupt)
2018 {
2019 	struct hid_report_enum *report_enum = hid->report_enum + type;
2020 	struct hid_report *report;
2021 	struct hid_driver *hdrv;
2022 	int max_buffer_size = HID_MAX_BUFFER_SIZE;
2023 	u32 rsize, csize = size;
2024 	u8 *cdata = data;
2025 	int ret = 0;
2026 
2027 	report = hid_get_report(report_enum, data);
2028 	if (!report)
2029 		goto out;
2030 
2031 	if (report_enum->numbered) {
2032 		cdata++;
2033 		csize--;
2034 	}
2035 
2036 	rsize = hid_compute_report_size(report);
2037 
2038 	if (hid->ll_driver->max_buffer_size)
2039 		max_buffer_size = hid->ll_driver->max_buffer_size;
2040 
2041 	if (report_enum->numbered && rsize >= max_buffer_size)
2042 		rsize = max_buffer_size - 1;
2043 	else if (rsize > max_buffer_size)
2044 		rsize = max_buffer_size;
2045 
2046 	if (csize < rsize) {
2047 		dbg_hid("report %d is too short, (%d < %d)\n", report->id,
2048 				csize, rsize);
2049 		memset(cdata + csize, 0, rsize - csize);
2050 	}
2051 
2052 	if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event)
2053 		hid->hiddev_report_event(hid, report);
2054 	if (hid->claimed & HID_CLAIMED_HIDRAW) {
2055 		ret = hidraw_report_event(hid, data, size);
2056 		if (ret)
2057 			goto out;
2058 	}
2059 
2060 	if (hid->claimed != HID_CLAIMED_HIDRAW && report->maxfield) {
2061 		hid_process_report(hid, report, cdata, interrupt);
2062 		hdrv = hid->driver;
2063 		if (hdrv && hdrv->report)
2064 			hdrv->report(hid, report);
2065 	}
2066 
2067 	if (hid->claimed & HID_CLAIMED_INPUT)
2068 		hidinput_report_event(hid, report);
2069 out:
2070 	return ret;
2071 }
2072 EXPORT_SYMBOL_GPL(hid_report_raw_event);
2073 
2074 
__hid_input_report(struct hid_device * hid,enum hid_report_type type,u8 * data,u32 size,int interrupt,u64 source,bool from_bpf,bool lock_already_taken)2075 static int __hid_input_report(struct hid_device *hid, enum hid_report_type type,
2076 			      u8 *data, u32 size, int interrupt, u64 source, bool from_bpf,
2077 			      bool lock_already_taken)
2078 {
2079 	struct hid_report_enum *report_enum;
2080 	struct hid_driver *hdrv;
2081 	struct hid_report *report;
2082 	int ret = 0;
2083 
2084 	if (!hid)
2085 		return -ENODEV;
2086 
2087 	ret = down_trylock(&hid->driver_input_lock);
2088 	if (lock_already_taken && !ret) {
2089 		up(&hid->driver_input_lock);
2090 		return -EINVAL;
2091 	} else if (!lock_already_taken && ret) {
2092 		return -EBUSY;
2093 	}
2094 
2095 	if (!hid->driver) {
2096 		ret = -ENODEV;
2097 		goto unlock;
2098 	}
2099 	report_enum = hid->report_enum + type;
2100 	hdrv = hid->driver;
2101 
2102 	data = dispatch_hid_bpf_device_event(hid, type, data, &size, interrupt, source, from_bpf);
2103 	if (IS_ERR(data)) {
2104 		ret = PTR_ERR(data);
2105 		goto unlock;
2106 	}
2107 
2108 	if (!size) {
2109 		dbg_hid("empty report\n");
2110 		ret = -1;
2111 		goto unlock;
2112 	}
2113 
2114 	/* Avoid unnecessary overhead if debugfs is disabled */
2115 	if (!list_empty(&hid->debug_list))
2116 		hid_dump_report(hid, type, data, size);
2117 
2118 	report = hid_get_report(report_enum, data);
2119 
2120 	if (!report) {
2121 		ret = -1;
2122 		goto unlock;
2123 	}
2124 
2125 	if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) {
2126 		ret = hdrv->raw_event(hid, report, data, size);
2127 		if (ret < 0)
2128 			goto unlock;
2129 	}
2130 
2131 	ret = hid_report_raw_event(hid, type, data, size, interrupt);
2132 
2133 unlock:
2134 	if (!lock_already_taken)
2135 		up(&hid->driver_input_lock);
2136 	return ret;
2137 }
2138 
2139 /**
2140  * hid_input_report - report data from lower layer (usb, bt...)
2141  *
2142  * @hid: hid device
2143  * @type: HID report type (HID_*_REPORT)
2144  * @data: report contents
2145  * @size: size of data parameter
2146  * @interrupt: distinguish between interrupt and control transfers
2147  *
2148  * This is data entry for lower layers.
2149  */
hid_input_report(struct hid_device * hid,enum hid_report_type type,u8 * data,u32 size,int interrupt)2150 int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
2151 		     int interrupt)
2152 {
2153 	return __hid_input_report(hid, type, data, size, interrupt, 0,
2154 				  false, /* from_bpf */
2155 				  false /* lock_already_taken */);
2156 }
2157 EXPORT_SYMBOL_GPL(hid_input_report);
2158 
hid_match_one_id(const struct hid_device * hdev,const struct hid_device_id * id)2159 bool hid_match_one_id(const struct hid_device *hdev,
2160 		      const struct hid_device_id *id)
2161 {
2162 	return (id->bus == HID_BUS_ANY || id->bus == hdev->bus) &&
2163 		(id->group == HID_GROUP_ANY || id->group == hdev->group) &&
2164 		(id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) &&
2165 		(id->product == HID_ANY_ID || id->product == hdev->product);
2166 }
2167 
hid_match_id(const struct hid_device * hdev,const struct hid_device_id * id)2168 const struct hid_device_id *hid_match_id(const struct hid_device *hdev,
2169 		const struct hid_device_id *id)
2170 {
2171 	for (; id->bus; id++)
2172 		if (hid_match_one_id(hdev, id))
2173 			return id;
2174 
2175 	return NULL;
2176 }
2177 EXPORT_SYMBOL_GPL(hid_match_id);
2178 
2179 static const struct hid_device_id hid_hiddev_list[] = {
2180 	{ HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS) },
2181 	{ HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS1) },
2182 	{ }
2183 };
2184 
hid_hiddev(struct hid_device * hdev)2185 static bool hid_hiddev(struct hid_device *hdev)
2186 {
2187 	return !!hid_match_id(hdev, hid_hiddev_list);
2188 }
2189 
2190 
2191 static ssize_t
report_descriptor_read(struct file * filp,struct kobject * kobj,const struct bin_attribute * attr,char * buf,loff_t off,size_t count)2192 report_descriptor_read(struct file *filp, struct kobject *kobj,
2193 		       const struct bin_attribute *attr,
2194 		       char *buf, loff_t off, size_t count)
2195 {
2196 	struct device *dev = kobj_to_dev(kobj);
2197 	struct hid_device *hdev = to_hid_device(dev);
2198 
2199 	if (off >= hdev->rsize)
2200 		return 0;
2201 
2202 	if (off + count > hdev->rsize)
2203 		count = hdev->rsize - off;
2204 
2205 	memcpy(buf, hdev->rdesc + off, count);
2206 
2207 	return count;
2208 }
2209 
2210 static ssize_t
country_show(struct device * dev,struct device_attribute * attr,char * buf)2211 country_show(struct device *dev, struct device_attribute *attr,
2212 	     char *buf)
2213 {
2214 	struct hid_device *hdev = to_hid_device(dev);
2215 
2216 	return sprintf(buf, "%02x\n", hdev->country & 0xff);
2217 }
2218 
2219 static const BIN_ATTR_RO(report_descriptor, HID_MAX_DESCRIPTOR_SIZE);
2220 
2221 static const DEVICE_ATTR_RO(country);
2222 
hid_connect(struct hid_device * hdev,unsigned int connect_mask)2223 int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
2224 {
2225 	static const char *types[] = { "Device", "Pointer", "Mouse", "Device",
2226 		"Joystick", "Gamepad", "Keyboard", "Keypad",
2227 		"Multi-Axis Controller"
2228 	};
2229 	const char *type, *bus;
2230 	char buf[64] = "";
2231 	unsigned int i;
2232 	int len;
2233 	int ret;
2234 
2235 	ret = hid_bpf_connect_device(hdev);
2236 	if (ret)
2237 		return ret;
2238 
2239 	if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE)
2240 		connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV);
2241 	if (hdev->quirks & HID_QUIRK_HIDINPUT_FORCE)
2242 		connect_mask |= HID_CONNECT_HIDINPUT_FORCE;
2243 	if (hdev->bus != BUS_USB)
2244 		connect_mask &= ~HID_CONNECT_HIDDEV;
2245 	if (hid_hiddev(hdev))
2246 		connect_mask |= HID_CONNECT_HIDDEV_FORCE;
2247 
2248 	if ((connect_mask & HID_CONNECT_HIDINPUT) && !hidinput_connect(hdev,
2249 				connect_mask & HID_CONNECT_HIDINPUT_FORCE))
2250 		hdev->claimed |= HID_CLAIMED_INPUT;
2251 
2252 	if ((connect_mask & HID_CONNECT_HIDDEV) && hdev->hiddev_connect &&
2253 			!hdev->hiddev_connect(hdev,
2254 				connect_mask & HID_CONNECT_HIDDEV_FORCE))
2255 		hdev->claimed |= HID_CLAIMED_HIDDEV;
2256 	if ((connect_mask & HID_CONNECT_HIDRAW) && !hidraw_connect(hdev))
2257 		hdev->claimed |= HID_CLAIMED_HIDRAW;
2258 
2259 	if (connect_mask & HID_CONNECT_DRIVER)
2260 		hdev->claimed |= HID_CLAIMED_DRIVER;
2261 
2262 	/* Drivers with the ->raw_event callback set are not required to connect
2263 	 * to any other listener. */
2264 	if (!hdev->claimed && !hdev->driver->raw_event) {
2265 		hid_err(hdev, "device has no listeners, quitting\n");
2266 		return -ENODEV;
2267 	}
2268 
2269 	hid_process_ordering(hdev);
2270 
2271 	if ((hdev->claimed & HID_CLAIMED_INPUT) &&
2272 			(connect_mask & HID_CONNECT_FF) && hdev->ff_init)
2273 		hdev->ff_init(hdev);
2274 
2275 	len = 0;
2276 	if (hdev->claimed & HID_CLAIMED_INPUT)
2277 		len += sprintf(buf + len, "input");
2278 	if (hdev->claimed & HID_CLAIMED_HIDDEV)
2279 		len += sprintf(buf + len, "%shiddev%d", len ? "," : "",
2280 				((struct hiddev *)hdev->hiddev)->minor);
2281 	if (hdev->claimed & HID_CLAIMED_HIDRAW)
2282 		len += sprintf(buf + len, "%shidraw%d", len ? "," : "",
2283 				((struct hidraw *)hdev->hidraw)->minor);
2284 
2285 	type = "Device";
2286 	for (i = 0; i < hdev->maxcollection; i++) {
2287 		struct hid_collection *col = &hdev->collection[i];
2288 		if (col->type == HID_COLLECTION_APPLICATION &&
2289 		   (col->usage & HID_USAGE_PAGE) == HID_UP_GENDESK &&
2290 		   (col->usage & 0xffff) < ARRAY_SIZE(types)) {
2291 			type = types[col->usage & 0xffff];
2292 			break;
2293 		}
2294 	}
2295 
2296 	switch (hdev->bus) {
2297 	case BUS_USB:
2298 		bus = "USB";
2299 		break;
2300 	case BUS_BLUETOOTH:
2301 		bus = "BLUETOOTH";
2302 		break;
2303 	case BUS_I2C:
2304 		bus = "I2C";
2305 		break;
2306 	case BUS_VIRTUAL:
2307 		bus = "VIRTUAL";
2308 		break;
2309 	case BUS_INTEL_ISHTP:
2310 	case BUS_AMD_SFH:
2311 		bus = "SENSOR HUB";
2312 		break;
2313 	default:
2314 		bus = "<UNKNOWN>";
2315 	}
2316 
2317 	ret = device_create_file(&hdev->dev, &dev_attr_country);
2318 	if (ret)
2319 		hid_warn(hdev,
2320 			 "can't create sysfs country code attribute err: %d\n", ret);
2321 
2322 	hid_info(hdev, "%s: %s HID v%x.%02x %s [%s] on %s\n",
2323 		 buf, bus, hdev->version >> 8, hdev->version & 0xff,
2324 		 type, hdev->name, hdev->phys);
2325 
2326 	return 0;
2327 }
2328 EXPORT_SYMBOL_GPL(hid_connect);
2329 
hid_disconnect(struct hid_device * hdev)2330 void hid_disconnect(struct hid_device *hdev)
2331 {
2332 	device_remove_file(&hdev->dev, &dev_attr_country);
2333 	if (hdev->claimed & HID_CLAIMED_INPUT)
2334 		hidinput_disconnect(hdev);
2335 	if (hdev->claimed & HID_CLAIMED_HIDDEV)
2336 		hdev->hiddev_disconnect(hdev);
2337 	if (hdev->claimed & HID_CLAIMED_HIDRAW)
2338 		hidraw_disconnect(hdev);
2339 	hdev->claimed = 0;
2340 
2341 	hid_bpf_disconnect_device(hdev);
2342 }
2343 EXPORT_SYMBOL_GPL(hid_disconnect);
2344 
2345 /**
2346  * hid_hw_start - start underlying HW
2347  * @hdev: hid device
2348  * @connect_mask: which outputs to connect, see HID_CONNECT_*
2349  *
2350  * Call this in probe function *after* hid_parse. This will setup HW
2351  * buffers and start the device (if not defeirred to device open).
2352  * hid_hw_stop must be called if this was successful.
2353  */
hid_hw_start(struct hid_device * hdev,unsigned int connect_mask)2354 int hid_hw_start(struct hid_device *hdev, unsigned int connect_mask)
2355 {
2356 	int error;
2357 
2358 	error = hdev->ll_driver->start(hdev);
2359 	if (error)
2360 		return error;
2361 
2362 	if (connect_mask) {
2363 		error = hid_connect(hdev, connect_mask);
2364 		if (error) {
2365 			hdev->ll_driver->stop(hdev);
2366 			return error;
2367 		}
2368 	}
2369 
2370 	return 0;
2371 }
2372 EXPORT_SYMBOL_GPL(hid_hw_start);
2373 
2374 /**
2375  * hid_hw_stop - stop underlying HW
2376  * @hdev: hid device
2377  *
2378  * This is usually called from remove function or from probe when something
2379  * failed and hid_hw_start was called already.
2380  */
hid_hw_stop(struct hid_device * hdev)2381 void hid_hw_stop(struct hid_device *hdev)
2382 {
2383 	hid_disconnect(hdev);
2384 	hdev->ll_driver->stop(hdev);
2385 }
2386 EXPORT_SYMBOL_GPL(hid_hw_stop);
2387 
2388 /**
2389  * hid_hw_open - signal underlying HW to start delivering events
2390  * @hdev: hid device
2391  *
2392  * Tell underlying HW to start delivering events from the device.
2393  * This function should be called sometime after successful call
2394  * to hid_hw_start().
2395  */
hid_hw_open(struct hid_device * hdev)2396 int hid_hw_open(struct hid_device *hdev)
2397 {
2398 	int ret;
2399 
2400 	ret = mutex_lock_killable(&hdev->ll_open_lock);
2401 	if (ret)
2402 		return ret;
2403 
2404 	if (!hdev->ll_open_count++) {
2405 		ret = hdev->ll_driver->open(hdev);
2406 		if (ret)
2407 			hdev->ll_open_count--;
2408 
2409 		if (hdev->driver->on_hid_hw_open)
2410 			hdev->driver->on_hid_hw_open(hdev);
2411 	}
2412 
2413 	mutex_unlock(&hdev->ll_open_lock);
2414 	return ret;
2415 }
2416 EXPORT_SYMBOL_GPL(hid_hw_open);
2417 
2418 /**
2419  * hid_hw_close - signal underlaying HW to stop delivering events
2420  *
2421  * @hdev: hid device
2422  *
2423  * This function indicates that we are not interested in the events
2424  * from this device anymore. Delivery of events may or may not stop,
2425  * depending on the number of users still outstanding.
2426  */
hid_hw_close(struct hid_device * hdev)2427 void hid_hw_close(struct hid_device *hdev)
2428 {
2429 	mutex_lock(&hdev->ll_open_lock);
2430 	if (!--hdev->ll_open_count) {
2431 		hdev->ll_driver->close(hdev);
2432 
2433 		if (hdev->driver->on_hid_hw_close)
2434 			hdev->driver->on_hid_hw_close(hdev);
2435 	}
2436 	mutex_unlock(&hdev->ll_open_lock);
2437 }
2438 EXPORT_SYMBOL_GPL(hid_hw_close);
2439 
2440 /**
2441  * hid_hw_request - send report request to device
2442  *
2443  * @hdev: hid device
2444  * @report: report to send
2445  * @reqtype: hid request type
2446  */
hid_hw_request(struct hid_device * hdev,struct hid_report * report,enum hid_class_request reqtype)2447 void hid_hw_request(struct hid_device *hdev,
2448 		    struct hid_report *report, enum hid_class_request reqtype)
2449 {
2450 	if (hdev->ll_driver->request)
2451 		return hdev->ll_driver->request(hdev, report, reqtype);
2452 
2453 	__hid_request(hdev, report, reqtype);
2454 }
2455 EXPORT_SYMBOL_GPL(hid_hw_request);
2456 
__hid_hw_raw_request(struct hid_device * hdev,unsigned char reportnum,__u8 * buf,size_t len,enum hid_report_type rtype,enum hid_class_request reqtype,u64 source,bool from_bpf)2457 int __hid_hw_raw_request(struct hid_device *hdev,
2458 			 unsigned char reportnum, __u8 *buf,
2459 			 size_t len, enum hid_report_type rtype,
2460 			 enum hid_class_request reqtype,
2461 			 u64 source, bool from_bpf)
2462 {
2463 	unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
2464 	int ret;
2465 
2466 	if (hdev->ll_driver->max_buffer_size)
2467 		max_buffer_size = hdev->ll_driver->max_buffer_size;
2468 
2469 	if (len < 1 || len > max_buffer_size || !buf)
2470 		return -EINVAL;
2471 
2472 	ret = dispatch_hid_bpf_raw_requests(hdev, reportnum, buf, len, rtype,
2473 					    reqtype, source, from_bpf);
2474 	if (ret)
2475 		return ret;
2476 
2477 	return hdev->ll_driver->raw_request(hdev, reportnum, buf, len,
2478 					    rtype, reqtype);
2479 }
2480 
2481 /**
2482  * hid_hw_raw_request - send report request to device
2483  *
2484  * @hdev: hid device
2485  * @reportnum: report ID
2486  * @buf: in/out data to transfer
2487  * @len: length of buf
2488  * @rtype: HID report type
2489  * @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT
2490  *
2491  * Return: count of data transferred, negative if error
2492  *
2493  * Same behavior as hid_hw_request, but with raw buffers instead.
2494  */
hid_hw_raw_request(struct hid_device * hdev,unsigned char reportnum,__u8 * buf,size_t len,enum hid_report_type rtype,enum hid_class_request reqtype)2495 int hid_hw_raw_request(struct hid_device *hdev,
2496 		       unsigned char reportnum, __u8 *buf,
2497 		       size_t len, enum hid_report_type rtype, enum hid_class_request reqtype)
2498 {
2499 	return __hid_hw_raw_request(hdev, reportnum, buf, len, rtype, reqtype, 0, false);
2500 }
2501 EXPORT_SYMBOL_GPL(hid_hw_raw_request);
2502 
__hid_hw_output_report(struct hid_device * hdev,__u8 * buf,size_t len,u64 source,bool from_bpf)2503 int __hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len, u64 source,
2504 			   bool from_bpf)
2505 {
2506 	unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
2507 	int ret;
2508 
2509 	if (hdev->ll_driver->max_buffer_size)
2510 		max_buffer_size = hdev->ll_driver->max_buffer_size;
2511 
2512 	if (len < 1 || len > max_buffer_size || !buf)
2513 		return -EINVAL;
2514 
2515 	ret = dispatch_hid_bpf_output_report(hdev, buf, len, source, from_bpf);
2516 	if (ret)
2517 		return ret;
2518 
2519 	if (hdev->ll_driver->output_report)
2520 		return hdev->ll_driver->output_report(hdev, buf, len);
2521 
2522 	return -ENOSYS;
2523 }
2524 
2525 /**
2526  * hid_hw_output_report - send output report to device
2527  *
2528  * @hdev: hid device
2529  * @buf: raw data to transfer
2530  * @len: length of buf
2531  *
2532  * Return: count of data transferred, negative if error
2533  */
hid_hw_output_report(struct hid_device * hdev,__u8 * buf,size_t len)2534 int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len)
2535 {
2536 	return __hid_hw_output_report(hdev, buf, len, 0, false);
2537 }
2538 EXPORT_SYMBOL_GPL(hid_hw_output_report);
2539 
2540 #ifdef CONFIG_PM
hid_driver_suspend(struct hid_device * hdev,pm_message_t state)2541 int hid_driver_suspend(struct hid_device *hdev, pm_message_t state)
2542 {
2543 	if (hdev->driver && hdev->driver->suspend)
2544 		return hdev->driver->suspend(hdev, state);
2545 
2546 	return 0;
2547 }
2548 EXPORT_SYMBOL_GPL(hid_driver_suspend);
2549 
hid_driver_reset_resume(struct hid_device * hdev)2550 int hid_driver_reset_resume(struct hid_device *hdev)
2551 {
2552 	if (hdev->driver && hdev->driver->reset_resume)
2553 		return hdev->driver->reset_resume(hdev);
2554 
2555 	return 0;
2556 }
2557 EXPORT_SYMBOL_GPL(hid_driver_reset_resume);
2558 
hid_driver_resume(struct hid_device * hdev)2559 int hid_driver_resume(struct hid_device *hdev)
2560 {
2561 	if (hdev->driver && hdev->driver->resume)
2562 		return hdev->driver->resume(hdev);
2563 
2564 	return 0;
2565 }
2566 EXPORT_SYMBOL_GPL(hid_driver_resume);
2567 #endif /* CONFIG_PM */
2568 
2569 struct hid_dynid {
2570 	struct list_head list;
2571 	struct hid_device_id id;
2572 };
2573 
2574 /**
2575  * new_id_store - add a new HID device ID to this driver and re-probe devices
2576  * @drv: target device driver
2577  * @buf: buffer for scanning device ID data
2578  * @count: input size
2579  *
2580  * Adds a new dynamic hid device ID to this driver,
2581  * and causes the driver to probe for all devices again.
2582  */
new_id_store(struct device_driver * drv,const char * buf,size_t count)2583 static ssize_t new_id_store(struct device_driver *drv, const char *buf,
2584 		size_t count)
2585 {
2586 	struct hid_driver *hdrv = to_hid_driver(drv);
2587 	struct hid_dynid *dynid;
2588 	__u32 bus, vendor, product;
2589 	unsigned long driver_data = 0;
2590 	int ret;
2591 
2592 	ret = sscanf(buf, "%x %x %x %lx",
2593 			&bus, &vendor, &product, &driver_data);
2594 	if (ret < 3)
2595 		return -EINVAL;
2596 
2597 	dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
2598 	if (!dynid)
2599 		return -ENOMEM;
2600 
2601 	dynid->id.bus = bus;
2602 	dynid->id.group = HID_GROUP_ANY;
2603 	dynid->id.vendor = vendor;
2604 	dynid->id.product = product;
2605 	dynid->id.driver_data = driver_data;
2606 
2607 	spin_lock(&hdrv->dyn_lock);
2608 	list_add_tail(&dynid->list, &hdrv->dyn_list);
2609 	spin_unlock(&hdrv->dyn_lock);
2610 
2611 	ret = driver_attach(&hdrv->driver);
2612 
2613 	return ret ? : count;
2614 }
2615 static DRIVER_ATTR_WO(new_id);
2616 
2617 static struct attribute *hid_drv_attrs[] = {
2618 	&driver_attr_new_id.attr,
2619 	NULL,
2620 };
2621 ATTRIBUTE_GROUPS(hid_drv);
2622 
hid_free_dynids(struct hid_driver * hdrv)2623 static void hid_free_dynids(struct hid_driver *hdrv)
2624 {
2625 	struct hid_dynid *dynid, *n;
2626 
2627 	spin_lock(&hdrv->dyn_lock);
2628 	list_for_each_entry_safe(dynid, n, &hdrv->dyn_list, list) {
2629 		list_del(&dynid->list);
2630 		kfree(dynid);
2631 	}
2632 	spin_unlock(&hdrv->dyn_lock);
2633 }
2634 
hid_match_device(struct hid_device * hdev,struct hid_driver * hdrv)2635 const struct hid_device_id *hid_match_device(struct hid_device *hdev,
2636 					     struct hid_driver *hdrv)
2637 {
2638 	struct hid_dynid *dynid;
2639 
2640 	spin_lock(&hdrv->dyn_lock);
2641 	list_for_each_entry(dynid, &hdrv->dyn_list, list) {
2642 		if (hid_match_one_id(hdev, &dynid->id)) {
2643 			spin_unlock(&hdrv->dyn_lock);
2644 			return &dynid->id;
2645 		}
2646 	}
2647 	spin_unlock(&hdrv->dyn_lock);
2648 
2649 	return hid_match_id(hdev, hdrv->id_table);
2650 }
2651 EXPORT_SYMBOL_GPL(hid_match_device);
2652 
hid_bus_match(struct device * dev,const struct device_driver * drv)2653 static int hid_bus_match(struct device *dev, const struct device_driver *drv)
2654 {
2655 	struct hid_driver *hdrv = to_hid_driver(drv);
2656 	struct hid_device *hdev = to_hid_device(dev);
2657 
2658 	return hid_match_device(hdev, hdrv) != NULL;
2659 }
2660 
2661 /**
2662  * hid_compare_device_paths - check if both devices share the same path
2663  * @hdev_a: hid device
2664  * @hdev_b: hid device
2665  * @separator: char to use as separator
2666  *
2667  * Check if two devices share the same path up to the last occurrence of
2668  * the separator char. Both paths must exist (i.e., zero-length paths
2669  * don't match).
2670  */
hid_compare_device_paths(struct hid_device * hdev_a,struct hid_device * hdev_b,char separator)2671 bool hid_compare_device_paths(struct hid_device *hdev_a,
2672 			      struct hid_device *hdev_b, char separator)
2673 {
2674 	int n1 = strrchr(hdev_a->phys, separator) - hdev_a->phys;
2675 	int n2 = strrchr(hdev_b->phys, separator) - hdev_b->phys;
2676 
2677 	if (n1 != n2 || n1 <= 0 || n2 <= 0)
2678 		return false;
2679 
2680 	return !strncmp(hdev_a->phys, hdev_b->phys, n1);
2681 }
2682 EXPORT_SYMBOL_GPL(hid_compare_device_paths);
2683 
hid_check_device_match(struct hid_device * hdev,struct hid_driver * hdrv,const struct hid_device_id ** id)2684 static bool hid_check_device_match(struct hid_device *hdev,
2685 				   struct hid_driver *hdrv,
2686 				   const struct hid_device_id **id)
2687 {
2688 	*id = hid_match_device(hdev, hdrv);
2689 	if (!*id)
2690 		return false;
2691 
2692 	if (hdrv->match)
2693 		return hdrv->match(hdev, hid_ignore_special_drivers);
2694 
2695 	/*
2696 	 * hid-generic implements .match(), so we must be dealing with a
2697 	 * different HID driver here, and can simply check if
2698 	 * hid_ignore_special_drivers or HID_QUIRK_IGNORE_SPECIAL_DRIVER
2699 	 * are set or not.
2700 	 */
2701 	return !hid_ignore_special_drivers && !(hdev->quirks & HID_QUIRK_IGNORE_SPECIAL_DRIVER);
2702 }
2703 
__hid_device_probe(struct hid_device * hdev,struct hid_driver * hdrv)2704 static int __hid_device_probe(struct hid_device *hdev, struct hid_driver *hdrv)
2705 {
2706 	const struct hid_device_id *id;
2707 	int ret;
2708 
2709 	if (!hdev->bpf_rsize) {
2710 		/* in case a bpf program gets detached, we need to free the old one */
2711 		hid_free_bpf_rdesc(hdev);
2712 
2713 		/* keep this around so we know we called it once */
2714 		hdev->bpf_rsize = hdev->dev_rsize;
2715 
2716 		/* call_hid_bpf_rdesc_fixup will always return a valid pointer */
2717 		hdev->bpf_rdesc = call_hid_bpf_rdesc_fixup(hdev, hdev->dev_rdesc,
2718 							   &hdev->bpf_rsize);
2719 	}
2720 
2721 	if (!hid_check_device_match(hdev, hdrv, &id))
2722 		return -ENODEV;
2723 
2724 	hdev->devres_group_id = devres_open_group(&hdev->dev, NULL, GFP_KERNEL);
2725 	if (!hdev->devres_group_id)
2726 		return -ENOMEM;
2727 
2728 	/* reset the quirks that has been previously set */
2729 	hdev->quirks = hid_lookup_quirk(hdev);
2730 	hdev->driver = hdrv;
2731 
2732 	if (hdrv->probe) {
2733 		ret = hdrv->probe(hdev, id);
2734 	} else { /* default probe */
2735 		ret = hid_open_report(hdev);
2736 		if (!ret)
2737 			ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
2738 	}
2739 
2740 	/*
2741 	 * Note that we are not closing the devres group opened above so
2742 	 * even resources that were attached to the device after probe is
2743 	 * run are released when hid_device_remove() is executed. This is
2744 	 * needed as some drivers would allocate additional resources,
2745 	 * for example when updating firmware.
2746 	 */
2747 
2748 	if (ret) {
2749 		devres_release_group(&hdev->dev, hdev->devres_group_id);
2750 		hid_close_report(hdev);
2751 		hdev->driver = NULL;
2752 	}
2753 
2754 	return ret;
2755 }
2756 
hid_device_probe(struct device * dev)2757 static int hid_device_probe(struct device *dev)
2758 {
2759 	struct hid_device *hdev = to_hid_device(dev);
2760 	struct hid_driver *hdrv = to_hid_driver(dev->driver);
2761 	int ret = 0;
2762 
2763 	if (down_interruptible(&hdev->driver_input_lock))
2764 		return -EINTR;
2765 
2766 	hdev->io_started = false;
2767 	clear_bit(ffs(HID_STAT_REPROBED), &hdev->status);
2768 
2769 	if (!hdev->driver)
2770 		ret = __hid_device_probe(hdev, hdrv);
2771 
2772 	if (!hdev->io_started)
2773 		up(&hdev->driver_input_lock);
2774 
2775 	return ret;
2776 }
2777 
hid_device_remove(struct device * dev)2778 static void hid_device_remove(struct device *dev)
2779 {
2780 	struct hid_device *hdev = to_hid_device(dev);
2781 	struct hid_driver *hdrv;
2782 
2783 	down(&hdev->driver_input_lock);
2784 	hdev->io_started = false;
2785 
2786 	hdrv = hdev->driver;
2787 	if (hdrv) {
2788 		if (hdrv->remove)
2789 			hdrv->remove(hdev);
2790 		else /* default remove */
2791 			hid_hw_stop(hdev);
2792 
2793 		/* Release all devres resources allocated by the driver */
2794 		devres_release_group(&hdev->dev, hdev->devres_group_id);
2795 
2796 		hid_close_report(hdev);
2797 		hdev->driver = NULL;
2798 	}
2799 
2800 	if (!hdev->io_started)
2801 		up(&hdev->driver_input_lock);
2802 }
2803 
modalias_show(struct device * dev,struct device_attribute * a,char * buf)2804 static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
2805 			     char *buf)
2806 {
2807 	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
2808 
2809 	return scnprintf(buf, PAGE_SIZE, "hid:b%04Xg%04Xv%08Xp%08X\n",
2810 			 hdev->bus, hdev->group, hdev->vendor, hdev->product);
2811 }
2812 static DEVICE_ATTR_RO(modalias);
2813 
2814 static struct attribute *hid_dev_attrs[] = {
2815 	&dev_attr_modalias.attr,
2816 	NULL,
2817 };
2818 static const struct bin_attribute *hid_dev_bin_attrs[] = {
2819 	&bin_attr_report_descriptor,
2820 	NULL
2821 };
2822 static const struct attribute_group hid_dev_group = {
2823 	.attrs = hid_dev_attrs,
2824 	.bin_attrs_new = hid_dev_bin_attrs,
2825 };
2826 __ATTRIBUTE_GROUPS(hid_dev);
2827 
hid_uevent(const struct device * dev,struct kobj_uevent_env * env)2828 static int hid_uevent(const struct device *dev, struct kobj_uevent_env *env)
2829 {
2830 	const struct hid_device *hdev = to_hid_device(dev);
2831 
2832 	if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X",
2833 			hdev->bus, hdev->vendor, hdev->product))
2834 		return -ENOMEM;
2835 
2836 	if (add_uevent_var(env, "HID_NAME=%s", hdev->name))
2837 		return -ENOMEM;
2838 
2839 	if (add_uevent_var(env, "HID_PHYS=%s", hdev->phys))
2840 		return -ENOMEM;
2841 
2842 	if (add_uevent_var(env, "HID_UNIQ=%s", hdev->uniq))
2843 		return -ENOMEM;
2844 
2845 	if (add_uevent_var(env, "MODALIAS=hid:b%04Xg%04Xv%08Xp%08X",
2846 			   hdev->bus, hdev->group, hdev->vendor, hdev->product))
2847 		return -ENOMEM;
2848 
2849 	return 0;
2850 }
2851 
2852 const struct bus_type hid_bus_type = {
2853 	.name		= "hid",
2854 	.dev_groups	= hid_dev_groups,
2855 	.drv_groups	= hid_drv_groups,
2856 	.match		= hid_bus_match,
2857 	.probe		= hid_device_probe,
2858 	.remove		= hid_device_remove,
2859 	.uevent		= hid_uevent,
2860 };
2861 EXPORT_SYMBOL(hid_bus_type);
2862 
hid_add_device(struct hid_device * hdev)2863 int hid_add_device(struct hid_device *hdev)
2864 {
2865 	static atomic_t id = ATOMIC_INIT(0);
2866 	int ret;
2867 
2868 	if (WARN_ON(hdev->status & HID_STAT_ADDED))
2869 		return -EBUSY;
2870 
2871 	hdev->quirks = hid_lookup_quirk(hdev);
2872 
2873 	/* we need to kill them here, otherwise they will stay allocated to
2874 	 * wait for coming driver */
2875 	if (hid_ignore(hdev))
2876 		return -ENODEV;
2877 
2878 	/*
2879 	 * Check for the mandatory transport channel.
2880 	 */
2881 	 if (!hdev->ll_driver->raw_request) {
2882 		hid_err(hdev, "transport driver missing .raw_request()\n");
2883 		return -EINVAL;
2884 	 }
2885 
2886 	/*
2887 	 * Read the device report descriptor once and use as template
2888 	 * for the driver-specific modifications.
2889 	 */
2890 	ret = hdev->ll_driver->parse(hdev);
2891 	if (ret)
2892 		return ret;
2893 	if (!hdev->dev_rdesc)
2894 		return -ENODEV;
2895 
2896 	/*
2897 	 * Scan generic devices for group information
2898 	 */
2899 	if (hid_ignore_special_drivers) {
2900 		hdev->group = HID_GROUP_GENERIC;
2901 	} else if (!hdev->group &&
2902 		   !(hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)) {
2903 		ret = hid_scan_report(hdev);
2904 		if (ret)
2905 			hid_warn(hdev, "bad device descriptor (%d)\n", ret);
2906 	}
2907 
2908 	hdev->id = atomic_inc_return(&id);
2909 
2910 	/* XXX hack, any other cleaner solution after the driver core
2911 	 * is converted to allow more than 20 bytes as the device name? */
2912 	dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
2913 		     hdev->vendor, hdev->product, hdev->id);
2914 
2915 	hid_debug_register(hdev, dev_name(&hdev->dev));
2916 	ret = device_add(&hdev->dev);
2917 	if (!ret)
2918 		hdev->status |= HID_STAT_ADDED;
2919 	else
2920 		hid_debug_unregister(hdev);
2921 
2922 	return ret;
2923 }
2924 EXPORT_SYMBOL_GPL(hid_add_device);
2925 
2926 /**
2927  * hid_allocate_device - allocate new hid device descriptor
2928  *
2929  * Allocate and initialize hid device, so that hid_destroy_device might be
2930  * used to free it.
2931  *
2932  * New hid_device pointer is returned on success, otherwise ERR_PTR encoded
2933  * error value.
2934  */
hid_allocate_device(void)2935 struct hid_device *hid_allocate_device(void)
2936 {
2937 	struct hid_device *hdev;
2938 	int ret = -ENOMEM;
2939 
2940 	hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2941 	if (hdev == NULL)
2942 		return ERR_PTR(ret);
2943 
2944 	device_initialize(&hdev->dev);
2945 	hdev->dev.release = hid_device_release;
2946 	hdev->dev.bus = &hid_bus_type;
2947 	device_enable_async_suspend(&hdev->dev);
2948 
2949 	hid_close_report(hdev);
2950 
2951 	init_waitqueue_head(&hdev->debug_wait);
2952 	INIT_LIST_HEAD(&hdev->debug_list);
2953 	spin_lock_init(&hdev->debug_list_lock);
2954 	sema_init(&hdev->driver_input_lock, 1);
2955 	mutex_init(&hdev->ll_open_lock);
2956 	kref_init(&hdev->ref);
2957 
2958 	ret = hid_bpf_device_init(hdev);
2959 	if (ret)
2960 		goto out_err;
2961 
2962 	return hdev;
2963 
2964 out_err:
2965 	hid_destroy_device(hdev);
2966 	return ERR_PTR(ret);
2967 }
2968 EXPORT_SYMBOL_GPL(hid_allocate_device);
2969 
hid_remove_device(struct hid_device * hdev)2970 static void hid_remove_device(struct hid_device *hdev)
2971 {
2972 	if (hdev->status & HID_STAT_ADDED) {
2973 		device_del(&hdev->dev);
2974 		hid_debug_unregister(hdev);
2975 		hdev->status &= ~HID_STAT_ADDED;
2976 	}
2977 	hid_free_bpf_rdesc(hdev);
2978 	kfree(hdev->dev_rdesc);
2979 	hdev->dev_rdesc = NULL;
2980 	hdev->dev_rsize = 0;
2981 	hdev->bpf_rsize = 0;
2982 }
2983 
2984 /**
2985  * hid_destroy_device - free previously allocated device
2986  *
2987  * @hdev: hid device
2988  *
2989  * If you allocate hid_device through hid_allocate_device, you should ever
2990  * free by this function.
2991  */
hid_destroy_device(struct hid_device * hdev)2992 void hid_destroy_device(struct hid_device *hdev)
2993 {
2994 	hid_bpf_destroy_device(hdev);
2995 	hid_remove_device(hdev);
2996 	put_device(&hdev->dev);
2997 }
2998 EXPORT_SYMBOL_GPL(hid_destroy_device);
2999 
3000 
__hid_bus_reprobe_drivers(struct device * dev,void * data)3001 static int __hid_bus_reprobe_drivers(struct device *dev, void *data)
3002 {
3003 	struct hid_driver *hdrv = data;
3004 	struct hid_device *hdev = to_hid_device(dev);
3005 
3006 	if (hdev->driver == hdrv &&
3007 	    !hdrv->match(hdev, hid_ignore_special_drivers) &&
3008 	    !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
3009 		return device_reprobe(dev);
3010 
3011 	return 0;
3012 }
3013 
__hid_bus_driver_added(struct device_driver * drv,void * data)3014 static int __hid_bus_driver_added(struct device_driver *drv, void *data)
3015 {
3016 	struct hid_driver *hdrv = to_hid_driver(drv);
3017 
3018 	if (hdrv->match) {
3019 		bus_for_each_dev(&hid_bus_type, NULL, hdrv,
3020 				 __hid_bus_reprobe_drivers);
3021 	}
3022 
3023 	return 0;
3024 }
3025 
__bus_removed_driver(struct device_driver * drv,void * data)3026 static int __bus_removed_driver(struct device_driver *drv, void *data)
3027 {
3028 	return bus_rescan_devices(&hid_bus_type);
3029 }
3030 
__hid_register_driver(struct hid_driver * hdrv,struct module * owner,const char * mod_name)3031 int __hid_register_driver(struct hid_driver *hdrv, struct module *owner,
3032 		const char *mod_name)
3033 {
3034 	int ret;
3035 
3036 	hdrv->driver.name = hdrv->name;
3037 	hdrv->driver.bus = &hid_bus_type;
3038 	hdrv->driver.owner = owner;
3039 	hdrv->driver.mod_name = mod_name;
3040 
3041 	INIT_LIST_HEAD(&hdrv->dyn_list);
3042 	spin_lock_init(&hdrv->dyn_lock);
3043 
3044 	ret = driver_register(&hdrv->driver);
3045 
3046 	if (ret == 0)
3047 		bus_for_each_drv(&hid_bus_type, NULL, NULL,
3048 				 __hid_bus_driver_added);
3049 
3050 	return ret;
3051 }
3052 EXPORT_SYMBOL_GPL(__hid_register_driver);
3053 
hid_unregister_driver(struct hid_driver * hdrv)3054 void hid_unregister_driver(struct hid_driver *hdrv)
3055 {
3056 	driver_unregister(&hdrv->driver);
3057 	hid_free_dynids(hdrv);
3058 
3059 	bus_for_each_drv(&hid_bus_type, NULL, hdrv, __bus_removed_driver);
3060 }
3061 EXPORT_SYMBOL_GPL(hid_unregister_driver);
3062 
hid_check_keys_pressed(struct hid_device * hid)3063 int hid_check_keys_pressed(struct hid_device *hid)
3064 {
3065 	struct hid_input *hidinput;
3066 	int i;
3067 
3068 	if (!(hid->claimed & HID_CLAIMED_INPUT))
3069 		return 0;
3070 
3071 	list_for_each_entry(hidinput, &hid->inputs, list) {
3072 		for (i = 0; i < BITS_TO_LONGS(KEY_MAX); i++)
3073 			if (hidinput->input->key[i])
3074 				return 1;
3075 	}
3076 
3077 	return 0;
3078 }
3079 EXPORT_SYMBOL_GPL(hid_check_keys_pressed);
3080 
3081 #ifdef CONFIG_HID_BPF
3082 static const struct hid_ops __hid_ops = {
3083 	.hid_get_report = hid_get_report,
3084 	.hid_hw_raw_request = __hid_hw_raw_request,
3085 	.hid_hw_output_report = __hid_hw_output_report,
3086 	.hid_input_report = __hid_input_report,
3087 	.owner = THIS_MODULE,
3088 	.bus_type = &hid_bus_type,
3089 };
3090 #endif
3091 
hid_init(void)3092 static int __init hid_init(void)
3093 {
3094 	int ret;
3095 
3096 	ret = bus_register(&hid_bus_type);
3097 	if (ret) {
3098 		pr_err("can't register hid bus\n");
3099 		goto err;
3100 	}
3101 
3102 #ifdef CONFIG_HID_BPF
3103 	hid_ops = &__hid_ops;
3104 #endif
3105 
3106 	ret = hidraw_init();
3107 	if (ret)
3108 		goto err_bus;
3109 
3110 	hid_debug_init();
3111 
3112 	return 0;
3113 err_bus:
3114 	bus_unregister(&hid_bus_type);
3115 err:
3116 	return ret;
3117 }
3118 
hid_exit(void)3119 static void __exit hid_exit(void)
3120 {
3121 #ifdef CONFIG_HID_BPF
3122 	hid_ops = NULL;
3123 #endif
3124 	hid_debug_exit();
3125 	hidraw_exit();
3126 	bus_unregister(&hid_bus_type);
3127 	hid_quirks_exit(HID_BUS_ANY);
3128 }
3129 
3130 module_init(hid_init);
3131 module_exit(hid_exit);
3132 
3133 MODULE_AUTHOR("Andreas Gal");
3134 MODULE_AUTHOR("Vojtech Pavlik");
3135 MODULE_AUTHOR("Jiri Kosina");
3136 MODULE_DESCRIPTION("HID support for Linux");
3137 MODULE_LICENSE("GPL");
3138