1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * HID support for Linux
4 *
5 * Copyright (c) 1999 Andreas Gal
6 * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
7 * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
8 * Copyright (c) 2006-2012 Jiri Kosina
9 */
10
11 /*
12 */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/list.h>
21 #include <linux/mm.h>
22 #include <linux/spinlock.h>
23 #include <linux/unaligned.h>
24 #include <asm/byteorder.h>
25 #include <linux/input.h>
26 #include <linux/wait.h>
27 #include <linux/vmalloc.h>
28 #include <linux/sched.h>
29 #include <linux/semaphore.h>
30
31 #include <linux/hid.h>
32 #include <linux/hiddev.h>
33 #include <linux/hid-debug.h>
34 #include <linux/hidraw.h>
35
36 #include "hid-ids.h"
37
38 /*
39 * Version Information
40 */
41
42 #define DRIVER_DESC "HID core driver"
43
44 static int hid_ignore_special_drivers = 0;
45 module_param_named(ignore_special_drivers, hid_ignore_special_drivers, int, 0600);
46 MODULE_PARM_DESC(ignore_special_drivers, "Ignore any special drivers and handle all devices by generic driver");
47
48 /*
49 * Convert a signed n-bit integer to signed 32-bit integer.
50 */
51
snto32(__u32 value,unsigned int n)52 static s32 snto32(__u32 value, unsigned int n)
53 {
54 if (!value || !n)
55 return 0;
56
57 if (n > 32)
58 n = 32;
59
60 return sign_extend32(value, n - 1);
61 }
62
63 /*
64 * Convert a signed 32-bit integer to a signed n-bit integer.
65 */
66
s32ton(__s32 value,unsigned int n)67 static u32 s32ton(__s32 value, unsigned int n)
68 {
69 s32 a = value >> (n - 1);
70
71 if (a && a != -1)
72 return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1;
73 return value & ((1 << n) - 1);
74 }
75
76 /*
77 * Register a new report for a device.
78 */
79
hid_register_report(struct hid_device * device,enum hid_report_type type,unsigned int id,unsigned int application)80 struct hid_report *hid_register_report(struct hid_device *device,
81 enum hid_report_type type, unsigned int id,
82 unsigned int application)
83 {
84 struct hid_report_enum *report_enum = device->report_enum + type;
85 struct hid_report *report;
86
87 if (id >= HID_MAX_IDS)
88 return NULL;
89 if (report_enum->report_id_hash[id])
90 return report_enum->report_id_hash[id];
91
92 report = kzalloc(sizeof(struct hid_report), GFP_KERNEL);
93 if (!report)
94 return NULL;
95
96 if (id != 0)
97 report_enum->numbered = 1;
98
99 report->id = id;
100 report->type = type;
101 report->size = 0;
102 report->device = device;
103 report->application = application;
104 report_enum->report_id_hash[id] = report;
105
106 list_add_tail(&report->list, &report_enum->report_list);
107 INIT_LIST_HEAD(&report->field_entry_list);
108
109 return report;
110 }
111 EXPORT_SYMBOL_GPL(hid_register_report);
112
113 /*
114 * Register a new field for this report.
115 */
116
hid_register_field(struct hid_report * report,unsigned usages)117 static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages)
118 {
119 struct hid_field *field;
120
121 if (report->maxfield == HID_MAX_FIELDS) {
122 hid_err(report->device, "too many fields in report\n");
123 return NULL;
124 }
125
126 field = kvzalloc((sizeof(struct hid_field) +
127 usages * sizeof(struct hid_usage) +
128 3 * usages * sizeof(unsigned int)), GFP_KERNEL);
129 if (!field)
130 return NULL;
131
132 field->index = report->maxfield++;
133 report->field[field->index] = field;
134 field->usage = (struct hid_usage *)(field + 1);
135 field->value = (s32 *)(field->usage + usages);
136 field->new_value = (s32 *)(field->value + usages);
137 field->usages_priorities = (s32 *)(field->new_value + usages);
138 field->report = report;
139
140 return field;
141 }
142
143 /*
144 * Open a collection. The type/usage is pushed on the stack.
145 */
146
open_collection(struct hid_parser * parser,unsigned type)147 static int open_collection(struct hid_parser *parser, unsigned type)
148 {
149 struct hid_collection *collection;
150 unsigned usage;
151 int collection_index;
152
153 usage = parser->local.usage[0];
154
155 if (parser->collection_stack_ptr == parser->collection_stack_size) {
156 unsigned int *collection_stack;
157 unsigned int new_size = parser->collection_stack_size +
158 HID_COLLECTION_STACK_SIZE;
159
160 collection_stack = krealloc(parser->collection_stack,
161 new_size * sizeof(unsigned int),
162 GFP_KERNEL);
163 if (!collection_stack)
164 return -ENOMEM;
165
166 parser->collection_stack = collection_stack;
167 parser->collection_stack_size = new_size;
168 }
169
170 if (parser->device->maxcollection == parser->device->collection_size) {
171 collection = kmalloc(
172 array3_size(sizeof(struct hid_collection),
173 parser->device->collection_size,
174 2),
175 GFP_KERNEL);
176 if (collection == NULL) {
177 hid_err(parser->device, "failed to reallocate collection array\n");
178 return -ENOMEM;
179 }
180 memcpy(collection, parser->device->collection,
181 sizeof(struct hid_collection) *
182 parser->device->collection_size);
183 memset(collection + parser->device->collection_size, 0,
184 sizeof(struct hid_collection) *
185 parser->device->collection_size);
186 kfree(parser->device->collection);
187 parser->device->collection = collection;
188 parser->device->collection_size *= 2;
189 }
190
191 parser->collection_stack[parser->collection_stack_ptr++] =
192 parser->device->maxcollection;
193
194 collection_index = parser->device->maxcollection++;
195 collection = parser->device->collection + collection_index;
196 collection->type = type;
197 collection->usage = usage;
198 collection->level = parser->collection_stack_ptr - 1;
199 collection->parent_idx = (collection->level == 0) ? -1 :
200 parser->collection_stack[collection->level - 1];
201
202 if (type == HID_COLLECTION_APPLICATION)
203 parser->device->maxapplication++;
204
205 return 0;
206 }
207
208 /*
209 * Close a collection.
210 */
211
close_collection(struct hid_parser * parser)212 static int close_collection(struct hid_parser *parser)
213 {
214 if (!parser->collection_stack_ptr) {
215 hid_err(parser->device, "collection stack underflow\n");
216 return -EINVAL;
217 }
218 parser->collection_stack_ptr--;
219 return 0;
220 }
221
222 /*
223 * Climb up the stack, search for the specified collection type
224 * and return the usage.
225 */
226
hid_lookup_collection(struct hid_parser * parser,unsigned type)227 static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
228 {
229 struct hid_collection *collection = parser->device->collection;
230 int n;
231
232 for (n = parser->collection_stack_ptr - 1; n >= 0; n--) {
233 unsigned index = parser->collection_stack[n];
234 if (collection[index].type == type)
235 return collection[index].usage;
236 }
237 return 0; /* we know nothing about this usage type */
238 }
239
240 /*
241 * Concatenate usage which defines 16 bits or less with the
242 * currently defined usage page to form a 32 bit usage
243 */
244
complete_usage(struct hid_parser * parser,unsigned int index)245 static void complete_usage(struct hid_parser *parser, unsigned int index)
246 {
247 parser->local.usage[index] &= 0xFFFF;
248 parser->local.usage[index] |=
249 (parser->global.usage_page & 0xFFFF) << 16;
250 }
251
252 /*
253 * Add a usage to the temporary parser table.
254 */
255
hid_add_usage(struct hid_parser * parser,unsigned usage,u8 size)256 static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size)
257 {
258 if (parser->local.usage_index >= HID_MAX_USAGES) {
259 hid_err(parser->device, "usage index exceeded\n");
260 return -1;
261 }
262 parser->local.usage[parser->local.usage_index] = usage;
263
264 /*
265 * If Usage item only includes usage id, concatenate it with
266 * currently defined usage page
267 */
268 if (size <= 2)
269 complete_usage(parser, parser->local.usage_index);
270
271 parser->local.usage_size[parser->local.usage_index] = size;
272 parser->local.collection_index[parser->local.usage_index] =
273 parser->collection_stack_ptr ?
274 parser->collection_stack[parser->collection_stack_ptr - 1] : 0;
275 parser->local.usage_index++;
276 return 0;
277 }
278
279 /*
280 * Register a new field for this report.
281 */
282
hid_add_field(struct hid_parser * parser,unsigned report_type,unsigned flags)283 static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsigned flags)
284 {
285 struct hid_report *report;
286 struct hid_field *field;
287 unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
288 unsigned int usages;
289 unsigned int offset;
290 unsigned int i;
291 unsigned int application;
292
293 application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION);
294
295 report = hid_register_report(parser->device, report_type,
296 parser->global.report_id, application);
297 if (!report) {
298 hid_err(parser->device, "hid_register_report failed\n");
299 return -1;
300 }
301
302 /* Handle both signed and unsigned cases properly */
303 if ((parser->global.logical_minimum < 0 &&
304 parser->global.logical_maximum <
305 parser->global.logical_minimum) ||
306 (parser->global.logical_minimum >= 0 &&
307 (__u32)parser->global.logical_maximum <
308 (__u32)parser->global.logical_minimum)) {
309 dbg_hid("logical range invalid 0x%x 0x%x\n",
310 parser->global.logical_minimum,
311 parser->global.logical_maximum);
312 return -1;
313 }
314
315 offset = report->size;
316 report->size += parser->global.report_size * parser->global.report_count;
317
318 if (parser->device->ll_driver->max_buffer_size)
319 max_buffer_size = parser->device->ll_driver->max_buffer_size;
320
321 /* Total size check: Allow for possible report index byte */
322 if (report->size > (max_buffer_size - 1) << 3) {
323 hid_err(parser->device, "report is too long\n");
324 return -1;
325 }
326
327 if (!parser->local.usage_index) /* Ignore padding fields */
328 return 0;
329
330 usages = max_t(unsigned, parser->local.usage_index,
331 parser->global.report_count);
332
333 field = hid_register_field(report, usages);
334 if (!field)
335 return 0;
336
337 field->physical = hid_lookup_collection(parser, HID_COLLECTION_PHYSICAL);
338 field->logical = hid_lookup_collection(parser, HID_COLLECTION_LOGICAL);
339 field->application = application;
340
341 for (i = 0; i < usages; i++) {
342 unsigned j = i;
343 /* Duplicate the last usage we parsed if we have excess values */
344 if (i >= parser->local.usage_index)
345 j = parser->local.usage_index - 1;
346 field->usage[i].hid = parser->local.usage[j];
347 field->usage[i].collection_index =
348 parser->local.collection_index[j];
349 field->usage[i].usage_index = i;
350 field->usage[i].resolution_multiplier = 1;
351 }
352
353 field->maxusage = usages;
354 field->flags = flags;
355 field->report_offset = offset;
356 field->report_type = report_type;
357 field->report_size = parser->global.report_size;
358 field->report_count = parser->global.report_count;
359 field->logical_minimum = parser->global.logical_minimum;
360 field->logical_maximum = parser->global.logical_maximum;
361 field->physical_minimum = parser->global.physical_minimum;
362 field->physical_maximum = parser->global.physical_maximum;
363 field->unit_exponent = parser->global.unit_exponent;
364 field->unit = parser->global.unit;
365
366 return 0;
367 }
368
369 /*
370 * Read data value from item.
371 */
372
item_udata(struct hid_item * item)373 static u32 item_udata(struct hid_item *item)
374 {
375 switch (item->size) {
376 case 1: return item->data.u8;
377 case 2: return item->data.u16;
378 case 4: return item->data.u32;
379 }
380 return 0;
381 }
382
item_sdata(struct hid_item * item)383 static s32 item_sdata(struct hid_item *item)
384 {
385 switch (item->size) {
386 case 1: return item->data.s8;
387 case 2: return item->data.s16;
388 case 4: return item->data.s32;
389 }
390 return 0;
391 }
392
393 /*
394 * Process a global item.
395 */
396
hid_parser_global(struct hid_parser * parser,struct hid_item * item)397 static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
398 {
399 __s32 raw_value;
400 switch (item->tag) {
401 case HID_GLOBAL_ITEM_TAG_PUSH:
402
403 if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) {
404 hid_err(parser->device, "global environment stack overflow\n");
405 return -1;
406 }
407
408 memcpy(parser->global_stack + parser->global_stack_ptr++,
409 &parser->global, sizeof(struct hid_global));
410 return 0;
411
412 case HID_GLOBAL_ITEM_TAG_POP:
413
414 if (!parser->global_stack_ptr) {
415 hid_err(parser->device, "global environment stack underflow\n");
416 return -1;
417 }
418
419 memcpy(&parser->global, parser->global_stack +
420 --parser->global_stack_ptr, sizeof(struct hid_global));
421 return 0;
422
423 case HID_GLOBAL_ITEM_TAG_USAGE_PAGE:
424 parser->global.usage_page = item_udata(item);
425 return 0;
426
427 case HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM:
428 parser->global.logical_minimum = item_sdata(item);
429 return 0;
430
431 case HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM:
432 if (parser->global.logical_minimum < 0)
433 parser->global.logical_maximum = item_sdata(item);
434 else
435 parser->global.logical_maximum = item_udata(item);
436 return 0;
437
438 case HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM:
439 parser->global.physical_minimum = item_sdata(item);
440 return 0;
441
442 case HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM:
443 if (parser->global.physical_minimum < 0)
444 parser->global.physical_maximum = item_sdata(item);
445 else
446 parser->global.physical_maximum = item_udata(item);
447 return 0;
448
449 case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT:
450 /* Many devices provide unit exponent as a two's complement
451 * nibble due to the common misunderstanding of HID
452 * specification 1.11, 6.2.2.7 Global Items. Attempt to handle
453 * both this and the standard encoding. */
454 raw_value = item_sdata(item);
455 if (!(raw_value & 0xfffffff0))
456 parser->global.unit_exponent = snto32(raw_value, 4);
457 else
458 parser->global.unit_exponent = raw_value;
459 return 0;
460
461 case HID_GLOBAL_ITEM_TAG_UNIT:
462 parser->global.unit = item_udata(item);
463 return 0;
464
465 case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
466 parser->global.report_size = item_udata(item);
467 if (parser->global.report_size > 256) {
468 hid_err(parser->device, "invalid report_size %d\n",
469 parser->global.report_size);
470 return -1;
471 }
472 return 0;
473
474 case HID_GLOBAL_ITEM_TAG_REPORT_COUNT:
475 parser->global.report_count = item_udata(item);
476 if (parser->global.report_count > HID_MAX_USAGES) {
477 hid_err(parser->device, "invalid report_count %d\n",
478 parser->global.report_count);
479 return -1;
480 }
481 return 0;
482
483 case HID_GLOBAL_ITEM_TAG_REPORT_ID:
484 parser->global.report_id = item_udata(item);
485 if (parser->global.report_id == 0 ||
486 parser->global.report_id >= HID_MAX_IDS) {
487 hid_err(parser->device, "report_id %u is invalid\n",
488 parser->global.report_id);
489 return -1;
490 }
491 return 0;
492
493 default:
494 hid_err(parser->device, "unknown global tag 0x%x\n", item->tag);
495 return -1;
496 }
497 }
498
499 /*
500 * Process a local item.
501 */
502
hid_parser_local(struct hid_parser * parser,struct hid_item * item)503 static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
504 {
505 __u32 data;
506 unsigned n;
507 __u32 count;
508
509 data = item_udata(item);
510
511 switch (item->tag) {
512 case HID_LOCAL_ITEM_TAG_DELIMITER:
513
514 if (data) {
515 /*
516 * We treat items before the first delimiter
517 * as global to all usage sets (branch 0).
518 * In the moment we process only these global
519 * items and the first delimiter set.
520 */
521 if (parser->local.delimiter_depth != 0) {
522 hid_err(parser->device, "nested delimiters\n");
523 return -1;
524 }
525 parser->local.delimiter_depth++;
526 parser->local.delimiter_branch++;
527 } else {
528 if (parser->local.delimiter_depth < 1) {
529 hid_err(parser->device, "bogus close delimiter\n");
530 return -1;
531 }
532 parser->local.delimiter_depth--;
533 }
534 return 0;
535
536 case HID_LOCAL_ITEM_TAG_USAGE:
537
538 if (parser->local.delimiter_branch > 1) {
539 dbg_hid("alternative usage ignored\n");
540 return 0;
541 }
542
543 return hid_add_usage(parser, data, item->size);
544
545 case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
546
547 if (parser->local.delimiter_branch > 1) {
548 dbg_hid("alternative usage ignored\n");
549 return 0;
550 }
551
552 parser->local.usage_minimum = data;
553 return 0;
554
555 case HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM:
556
557 if (parser->local.delimiter_branch > 1) {
558 dbg_hid("alternative usage ignored\n");
559 return 0;
560 }
561
562 count = data - parser->local.usage_minimum;
563 if (count + parser->local.usage_index >= HID_MAX_USAGES) {
564 /*
565 * We do not warn if the name is not set, we are
566 * actually pre-scanning the device.
567 */
568 if (dev_name(&parser->device->dev))
569 hid_warn(parser->device,
570 "ignoring exceeding usage max\n");
571 data = HID_MAX_USAGES - parser->local.usage_index +
572 parser->local.usage_minimum - 1;
573 if (data <= 0) {
574 hid_err(parser->device,
575 "no more usage index available\n");
576 return -1;
577 }
578 }
579
580 for (n = parser->local.usage_minimum; n <= data; n++)
581 if (hid_add_usage(parser, n, item->size)) {
582 dbg_hid("hid_add_usage failed\n");
583 return -1;
584 }
585 return 0;
586
587 default:
588
589 dbg_hid("unknown local item tag 0x%x\n", item->tag);
590 return 0;
591 }
592 return 0;
593 }
594
595 /*
596 * Concatenate Usage Pages into Usages where relevant:
597 * As per specification, 6.2.2.8: "When the parser encounters a main item it
598 * concatenates the last declared Usage Page with a Usage to form a complete
599 * usage value."
600 */
601
hid_concatenate_last_usage_page(struct hid_parser * parser)602 static void hid_concatenate_last_usage_page(struct hid_parser *parser)
603 {
604 int i;
605 unsigned int usage_page;
606 unsigned int current_page;
607
608 if (!parser->local.usage_index)
609 return;
610
611 usage_page = parser->global.usage_page;
612
613 /*
614 * Concatenate usage page again only if last declared Usage Page
615 * has not been already used in previous usages concatenation
616 */
617 for (i = parser->local.usage_index - 1; i >= 0; i--) {
618 if (parser->local.usage_size[i] > 2)
619 /* Ignore extended usages */
620 continue;
621
622 current_page = parser->local.usage[i] >> 16;
623 if (current_page == usage_page)
624 break;
625
626 complete_usage(parser, i);
627 }
628 }
629
630 /*
631 * Process a main item.
632 */
633
hid_parser_main(struct hid_parser * parser,struct hid_item * item)634 static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
635 {
636 __u32 data;
637 int ret;
638
639 hid_concatenate_last_usage_page(parser);
640
641 data = item_udata(item);
642
643 switch (item->tag) {
644 case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
645 ret = open_collection(parser, data & 0xff);
646 break;
647 case HID_MAIN_ITEM_TAG_END_COLLECTION:
648 ret = close_collection(parser);
649 break;
650 case HID_MAIN_ITEM_TAG_INPUT:
651 ret = hid_add_field(parser, HID_INPUT_REPORT, data);
652 break;
653 case HID_MAIN_ITEM_TAG_OUTPUT:
654 ret = hid_add_field(parser, HID_OUTPUT_REPORT, data);
655 break;
656 case HID_MAIN_ITEM_TAG_FEATURE:
657 ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
658 break;
659 default:
660 hid_warn(parser->device, "unknown main item tag 0x%x\n", item->tag);
661 ret = 0;
662 }
663
664 memset(&parser->local, 0, sizeof(parser->local)); /* Reset the local parser environment */
665
666 return ret;
667 }
668
669 /*
670 * Process a reserved item.
671 */
672
hid_parser_reserved(struct hid_parser * parser,struct hid_item * item)673 static int hid_parser_reserved(struct hid_parser *parser, struct hid_item *item)
674 {
675 dbg_hid("reserved item type, tag 0x%x\n", item->tag);
676 return 0;
677 }
678
679 /*
680 * Free a report and all registered fields. The field->usage and
681 * field->value table's are allocated behind the field, so we need
682 * only to free(field) itself.
683 */
684
hid_free_report(struct hid_report * report)685 static void hid_free_report(struct hid_report *report)
686 {
687 unsigned n;
688
689 kfree(report->field_entries);
690
691 for (n = 0; n < report->maxfield; n++)
692 kvfree(report->field[n]);
693 kfree(report);
694 }
695
696 /*
697 * Close report. This function returns the device
698 * state to the point prior to hid_open_report().
699 */
hid_close_report(struct hid_device * device)700 static void hid_close_report(struct hid_device *device)
701 {
702 unsigned i, j;
703
704 for (i = 0; i < HID_REPORT_TYPES; i++) {
705 struct hid_report_enum *report_enum = device->report_enum + i;
706
707 for (j = 0; j < HID_MAX_IDS; j++) {
708 struct hid_report *report = report_enum->report_id_hash[j];
709 if (report)
710 hid_free_report(report);
711 }
712 memset(report_enum, 0, sizeof(*report_enum));
713 INIT_LIST_HEAD(&report_enum->report_list);
714 }
715
716 /*
717 * If the HID driver had a rdesc_fixup() callback, dev->rdesc
718 * will be allocated by hid-core and needs to be freed.
719 * Otherwise, it is either equal to dev_rdesc or bpf_rdesc, in
720 * which cases it'll be freed later on device removal or destroy.
721 */
722 if (device->rdesc != device->dev_rdesc && device->rdesc != device->bpf_rdesc)
723 kfree(device->rdesc);
724 device->rdesc = NULL;
725 device->rsize = 0;
726
727 kfree(device->collection);
728 device->collection = NULL;
729 device->collection_size = 0;
730 device->maxcollection = 0;
731 device->maxapplication = 0;
732
733 device->status &= ~HID_STAT_PARSED;
734 }
735
hid_free_bpf_rdesc(struct hid_device * hdev)736 static inline void hid_free_bpf_rdesc(struct hid_device *hdev)
737 {
738 /* bpf_rdesc is either equal to dev_rdesc or allocated by call_hid_bpf_rdesc_fixup() */
739 if (hdev->bpf_rdesc != hdev->dev_rdesc)
740 kfree(hdev->bpf_rdesc);
741 hdev->bpf_rdesc = NULL;
742 }
743
744 /*
745 * Free a device structure, all reports, and all fields.
746 */
747
hiddev_free(struct kref * ref)748 void hiddev_free(struct kref *ref)
749 {
750 struct hid_device *hid = container_of(ref, struct hid_device, ref);
751
752 hid_close_report(hid);
753 hid_free_bpf_rdesc(hid);
754 kfree(hid->dev_rdesc);
755 kfree(hid);
756 }
757
hid_device_release(struct device * dev)758 static void hid_device_release(struct device *dev)
759 {
760 struct hid_device *hid = to_hid_device(dev);
761
762 kref_put(&hid->ref, hiddev_free);
763 }
764
765 /*
766 * Fetch a report description item from the data stream. We support long
767 * items, though they are not used yet.
768 */
769
fetch_item(const __u8 * start,const __u8 * end,struct hid_item * item)770 static const u8 *fetch_item(const __u8 *start, const __u8 *end, struct hid_item *item)
771 {
772 u8 b;
773
774 if ((end - start) <= 0)
775 return NULL;
776
777 b = *start++;
778
779 item->type = (b >> 2) & 3;
780 item->tag = (b >> 4) & 15;
781
782 if (item->tag == HID_ITEM_TAG_LONG) {
783
784 item->format = HID_ITEM_FORMAT_LONG;
785
786 if ((end - start) < 2)
787 return NULL;
788
789 item->size = *start++;
790 item->tag = *start++;
791
792 if ((end - start) < item->size)
793 return NULL;
794
795 item->data.longdata = start;
796 start += item->size;
797 return start;
798 }
799
800 item->format = HID_ITEM_FORMAT_SHORT;
801 item->size = BIT(b & 3) >> 1; /* 0, 1, 2, 3 -> 0, 1, 2, 4 */
802
803 if (end - start < item->size)
804 return NULL;
805
806 switch (item->size) {
807 case 0:
808 break;
809
810 case 1:
811 item->data.u8 = *start;
812 break;
813
814 case 2:
815 item->data.u16 = get_unaligned_le16(start);
816 break;
817
818 case 4:
819 item->data.u32 = get_unaligned_le32(start);
820 break;
821 }
822
823 return start + item->size;
824 }
825
hid_scan_input_usage(struct hid_parser * parser,u32 usage)826 static void hid_scan_input_usage(struct hid_parser *parser, u32 usage)
827 {
828 struct hid_device *hid = parser->device;
829
830 if (usage == HID_DG_CONTACTID)
831 hid->group = HID_GROUP_MULTITOUCH;
832 }
833
hid_scan_feature_usage(struct hid_parser * parser,u32 usage)834 static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage)
835 {
836 if (usage == 0xff0000c5 && parser->global.report_count == 256 &&
837 parser->global.report_size == 8)
838 parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
839
840 if (usage == 0xff0000c6 && parser->global.report_count == 1 &&
841 parser->global.report_size == 8)
842 parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
843 }
844
hid_scan_collection(struct hid_parser * parser,unsigned type)845 static void hid_scan_collection(struct hid_parser *parser, unsigned type)
846 {
847 struct hid_device *hid = parser->device;
848 int i;
849
850 if (((parser->global.usage_page << 16) == HID_UP_SENSOR) &&
851 (type == HID_COLLECTION_PHYSICAL ||
852 type == HID_COLLECTION_APPLICATION))
853 hid->group = HID_GROUP_SENSOR_HUB;
854
855 if (hid->vendor == USB_VENDOR_ID_MICROSOFT &&
856 hid->product == USB_DEVICE_ID_MS_POWER_COVER &&
857 hid->group == HID_GROUP_MULTITOUCH)
858 hid->group = HID_GROUP_GENERIC;
859
860 if ((parser->global.usage_page << 16) == HID_UP_GENDESK)
861 for (i = 0; i < parser->local.usage_index; i++)
862 if (parser->local.usage[i] == HID_GD_POINTER)
863 parser->scan_flags |= HID_SCAN_FLAG_GD_POINTER;
864
865 if ((parser->global.usage_page << 16) >= HID_UP_MSVENDOR)
866 parser->scan_flags |= HID_SCAN_FLAG_VENDOR_SPECIFIC;
867
868 if ((parser->global.usage_page << 16) == HID_UP_GOOGLEVENDOR)
869 for (i = 0; i < parser->local.usage_index; i++)
870 if (parser->local.usage[i] ==
871 (HID_UP_GOOGLEVENDOR | 0x0001))
872 parser->device->group =
873 HID_GROUP_VIVALDI;
874 }
875
hid_scan_main(struct hid_parser * parser,struct hid_item * item)876 static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
877 {
878 __u32 data;
879 int i;
880
881 hid_concatenate_last_usage_page(parser);
882
883 data = item_udata(item);
884
885 switch (item->tag) {
886 case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
887 hid_scan_collection(parser, data & 0xff);
888 break;
889 case HID_MAIN_ITEM_TAG_END_COLLECTION:
890 break;
891 case HID_MAIN_ITEM_TAG_INPUT:
892 /* ignore constant inputs, they will be ignored by hid-input */
893 if (data & HID_MAIN_ITEM_CONSTANT)
894 break;
895 for (i = 0; i < parser->local.usage_index; i++)
896 hid_scan_input_usage(parser, parser->local.usage[i]);
897 break;
898 case HID_MAIN_ITEM_TAG_OUTPUT:
899 break;
900 case HID_MAIN_ITEM_TAG_FEATURE:
901 for (i = 0; i < parser->local.usage_index; i++)
902 hid_scan_feature_usage(parser, parser->local.usage[i]);
903 break;
904 }
905
906 /* Reset the local parser environment */
907 memset(&parser->local, 0, sizeof(parser->local));
908
909 return 0;
910 }
911
912 /*
913 * Scan a report descriptor before the device is added to the bus.
914 * Sets device groups and other properties that determine what driver
915 * to load.
916 */
hid_scan_report(struct hid_device * hid)917 static int hid_scan_report(struct hid_device *hid)
918 {
919 struct hid_parser *parser;
920 struct hid_item item;
921 const __u8 *start = hid->dev_rdesc;
922 const __u8 *end = start + hid->dev_rsize;
923 static int (*dispatch_type[])(struct hid_parser *parser,
924 struct hid_item *item) = {
925 hid_scan_main,
926 hid_parser_global,
927 hid_parser_local,
928 hid_parser_reserved
929 };
930
931 parser = vzalloc(sizeof(struct hid_parser));
932 if (!parser)
933 return -ENOMEM;
934
935 parser->device = hid;
936 hid->group = HID_GROUP_GENERIC;
937
938 /*
939 * The parsing is simpler than the one in hid_open_report() as we should
940 * be robust against hid errors. Those errors will be raised by
941 * hid_open_report() anyway.
942 */
943 while ((start = fetch_item(start, end, &item)) != NULL)
944 dispatch_type[item.type](parser, &item);
945
946 /*
947 * Handle special flags set during scanning.
948 */
949 if ((parser->scan_flags & HID_SCAN_FLAG_MT_WIN_8) &&
950 (hid->group == HID_GROUP_MULTITOUCH))
951 hid->group = HID_GROUP_MULTITOUCH_WIN_8;
952
953 /*
954 * Vendor specific handlings
955 */
956 switch (hid->vendor) {
957 case USB_VENDOR_ID_WACOM:
958 hid->group = HID_GROUP_WACOM;
959 break;
960 case USB_VENDOR_ID_SYNAPTICS:
961 if (hid->group == HID_GROUP_GENERIC)
962 if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC)
963 && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER))
964 /*
965 * hid-rmi should take care of them,
966 * not hid-generic
967 */
968 hid->group = HID_GROUP_RMI;
969 break;
970 }
971
972 kfree(parser->collection_stack);
973 vfree(parser);
974 return 0;
975 }
976
977 /**
978 * hid_parse_report - parse device report
979 *
980 * @hid: hid device
981 * @start: report start
982 * @size: report size
983 *
984 * Allocate the device report as read by the bus driver. This function should
985 * only be called from parse() in ll drivers.
986 */
hid_parse_report(struct hid_device * hid,const __u8 * start,unsigned size)987 int hid_parse_report(struct hid_device *hid, const __u8 *start, unsigned size)
988 {
989 hid->dev_rdesc = kmemdup(start, size, GFP_KERNEL);
990 if (!hid->dev_rdesc)
991 return -ENOMEM;
992 hid->dev_rsize = size;
993 return 0;
994 }
995 EXPORT_SYMBOL_GPL(hid_parse_report);
996
997 static const char * const hid_report_names[] = {
998 "HID_INPUT_REPORT",
999 "HID_OUTPUT_REPORT",
1000 "HID_FEATURE_REPORT",
1001 };
1002 /**
1003 * hid_validate_values - validate existing device report's value indexes
1004 *
1005 * @hid: hid device
1006 * @type: which report type to examine
1007 * @id: which report ID to examine (0 for first)
1008 * @field_index: which report field to examine
1009 * @report_counts: expected number of values
1010 *
1011 * Validate the number of values in a given field of a given report, after
1012 * parsing.
1013 */
hid_validate_values(struct hid_device * hid,enum hid_report_type type,unsigned int id,unsigned int field_index,unsigned int report_counts)1014 struct hid_report *hid_validate_values(struct hid_device *hid,
1015 enum hid_report_type type, unsigned int id,
1016 unsigned int field_index,
1017 unsigned int report_counts)
1018 {
1019 struct hid_report *report;
1020
1021 if (type > HID_FEATURE_REPORT) {
1022 hid_err(hid, "invalid HID report type %u\n", type);
1023 return NULL;
1024 }
1025
1026 if (id >= HID_MAX_IDS) {
1027 hid_err(hid, "invalid HID report id %u\n", id);
1028 return NULL;
1029 }
1030
1031 /*
1032 * Explicitly not using hid_get_report() here since it depends on
1033 * ->numbered being checked, which may not always be the case when
1034 * drivers go to access report values.
1035 */
1036 if (id == 0) {
1037 /*
1038 * Validating on id 0 means we should examine the first
1039 * report in the list.
1040 */
1041 report = list_first_entry_or_null(
1042 &hid->report_enum[type].report_list,
1043 struct hid_report, list);
1044 } else {
1045 report = hid->report_enum[type].report_id_hash[id];
1046 }
1047 if (!report) {
1048 hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
1049 return NULL;
1050 }
1051 if (report->maxfield <= field_index) {
1052 hid_err(hid, "not enough fields in %s %u\n",
1053 hid_report_names[type], id);
1054 return NULL;
1055 }
1056 if (report->field[field_index]->report_count < report_counts) {
1057 hid_err(hid, "not enough values in %s %u field %u\n",
1058 hid_report_names[type], id, field_index);
1059 return NULL;
1060 }
1061 return report;
1062 }
1063 EXPORT_SYMBOL_GPL(hid_validate_values);
1064
hid_calculate_multiplier(struct hid_device * hid,struct hid_field * multiplier)1065 static int hid_calculate_multiplier(struct hid_device *hid,
1066 struct hid_field *multiplier)
1067 {
1068 int m;
1069 __s32 v = *multiplier->value;
1070 __s32 lmin = multiplier->logical_minimum;
1071 __s32 lmax = multiplier->logical_maximum;
1072 __s32 pmin = multiplier->physical_minimum;
1073 __s32 pmax = multiplier->physical_maximum;
1074
1075 /*
1076 * "Because OS implementations will generally divide the control's
1077 * reported count by the Effective Resolution Multiplier, designers
1078 * should take care not to establish a potential Effective
1079 * Resolution Multiplier of zero."
1080 * HID Usage Table, v1.12, Section 4.3.1, p31
1081 */
1082 if (lmax - lmin == 0)
1083 return 1;
1084 /*
1085 * Handling the unit exponent is left as an exercise to whoever
1086 * finds a device where that exponent is not 0.
1087 */
1088 m = ((v - lmin)/(lmax - lmin) * (pmax - pmin) + pmin);
1089 if (unlikely(multiplier->unit_exponent != 0)) {
1090 hid_warn(hid,
1091 "unsupported Resolution Multiplier unit exponent %d\n",
1092 multiplier->unit_exponent);
1093 }
1094
1095 /* There are no devices with an effective multiplier > 255 */
1096 if (unlikely(m == 0 || m > 255 || m < -255)) {
1097 hid_warn(hid, "unsupported Resolution Multiplier %d\n", m);
1098 m = 1;
1099 }
1100
1101 return m;
1102 }
1103
hid_apply_multiplier_to_field(struct hid_device * hid,struct hid_field * field,struct hid_collection * multiplier_collection,int effective_multiplier)1104 static void hid_apply_multiplier_to_field(struct hid_device *hid,
1105 struct hid_field *field,
1106 struct hid_collection *multiplier_collection,
1107 int effective_multiplier)
1108 {
1109 struct hid_collection *collection;
1110 struct hid_usage *usage;
1111 int i;
1112
1113 /*
1114 * If multiplier_collection is NULL, the multiplier applies
1115 * to all fields in the report.
1116 * Otherwise, it is the Logical Collection the multiplier applies to
1117 * but our field may be in a subcollection of that collection.
1118 */
1119 for (i = 0; i < field->maxusage; i++) {
1120 usage = &field->usage[i];
1121
1122 collection = &hid->collection[usage->collection_index];
1123 while (collection->parent_idx != -1 &&
1124 collection != multiplier_collection)
1125 collection = &hid->collection[collection->parent_idx];
1126
1127 if (collection->parent_idx != -1 ||
1128 multiplier_collection == NULL)
1129 usage->resolution_multiplier = effective_multiplier;
1130
1131 }
1132 }
1133
hid_apply_multiplier(struct hid_device * hid,struct hid_field * multiplier)1134 static void hid_apply_multiplier(struct hid_device *hid,
1135 struct hid_field *multiplier)
1136 {
1137 struct hid_report_enum *rep_enum;
1138 struct hid_report *rep;
1139 struct hid_field *field;
1140 struct hid_collection *multiplier_collection;
1141 int effective_multiplier;
1142 int i;
1143
1144 /*
1145 * "The Resolution Multiplier control must be contained in the same
1146 * Logical Collection as the control(s) to which it is to be applied.
1147 * If no Resolution Multiplier is defined, then the Resolution
1148 * Multiplier defaults to 1. If more than one control exists in a
1149 * Logical Collection, the Resolution Multiplier is associated with
1150 * all controls in the collection. If no Logical Collection is
1151 * defined, the Resolution Multiplier is associated with all
1152 * controls in the report."
1153 * HID Usage Table, v1.12, Section 4.3.1, p30
1154 *
1155 * Thus, search from the current collection upwards until we find a
1156 * logical collection. Then search all fields for that same parent
1157 * collection. Those are the fields the multiplier applies to.
1158 *
1159 * If we have more than one multiplier, it will overwrite the
1160 * applicable fields later.
1161 */
1162 multiplier_collection = &hid->collection[multiplier->usage->collection_index];
1163 while (multiplier_collection->parent_idx != -1 &&
1164 multiplier_collection->type != HID_COLLECTION_LOGICAL)
1165 multiplier_collection = &hid->collection[multiplier_collection->parent_idx];
1166
1167 effective_multiplier = hid_calculate_multiplier(hid, multiplier);
1168
1169 rep_enum = &hid->report_enum[HID_INPUT_REPORT];
1170 list_for_each_entry(rep, &rep_enum->report_list, list) {
1171 for (i = 0; i < rep->maxfield; i++) {
1172 field = rep->field[i];
1173 hid_apply_multiplier_to_field(hid, field,
1174 multiplier_collection,
1175 effective_multiplier);
1176 }
1177 }
1178 }
1179
1180 /*
1181 * hid_setup_resolution_multiplier - set up all resolution multipliers
1182 *
1183 * @device: hid device
1184 *
1185 * Search for all Resolution Multiplier Feature Reports and apply their
1186 * value to all matching Input items. This only updates the internal struct
1187 * fields.
1188 *
1189 * The Resolution Multiplier is applied by the hardware. If the multiplier
1190 * is anything other than 1, the hardware will send pre-multiplied events
1191 * so that the same physical interaction generates an accumulated
1192 * accumulated_value = value * * multiplier
1193 * This may be achieved by sending
1194 * - "value * multiplier" for each event, or
1195 * - "value" but "multiplier" times as frequently, or
1196 * - a combination of the above
1197 * The only guarantee is that the same physical interaction always generates
1198 * an accumulated 'value * multiplier'.
1199 *
1200 * This function must be called before any event processing and after
1201 * any SetRequest to the Resolution Multiplier.
1202 */
hid_setup_resolution_multiplier(struct hid_device * hid)1203 void hid_setup_resolution_multiplier(struct hid_device *hid)
1204 {
1205 struct hid_report_enum *rep_enum;
1206 struct hid_report *rep;
1207 struct hid_usage *usage;
1208 int i, j;
1209
1210 rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
1211 list_for_each_entry(rep, &rep_enum->report_list, list) {
1212 for (i = 0; i < rep->maxfield; i++) {
1213 /* Ignore if report count is out of bounds. */
1214 if (rep->field[i]->report_count < 1)
1215 continue;
1216
1217 for (j = 0; j < rep->field[i]->maxusage; j++) {
1218 usage = &rep->field[i]->usage[j];
1219 if (usage->hid == HID_GD_RESOLUTION_MULTIPLIER)
1220 hid_apply_multiplier(hid,
1221 rep->field[i]);
1222 }
1223 }
1224 }
1225 }
1226 EXPORT_SYMBOL_GPL(hid_setup_resolution_multiplier);
1227
1228 /**
1229 * hid_open_report - open a driver-specific device report
1230 *
1231 * @device: hid device
1232 *
1233 * Parse a report description into a hid_device structure. Reports are
1234 * enumerated, fields are attached to these reports.
1235 * 0 returned on success, otherwise nonzero error value.
1236 *
1237 * This function (or the equivalent hid_parse() macro) should only be
1238 * called from probe() in drivers, before starting the device.
1239 */
hid_open_report(struct hid_device * device)1240 int hid_open_report(struct hid_device *device)
1241 {
1242 struct hid_parser *parser;
1243 struct hid_item item;
1244 unsigned int size;
1245 const __u8 *start;
1246 const __u8 *end;
1247 const __u8 *next;
1248 int ret;
1249 int i;
1250 static int (*dispatch_type[])(struct hid_parser *parser,
1251 struct hid_item *item) = {
1252 hid_parser_main,
1253 hid_parser_global,
1254 hid_parser_local,
1255 hid_parser_reserved
1256 };
1257
1258 if (WARN_ON(device->status & HID_STAT_PARSED))
1259 return -EBUSY;
1260
1261 start = device->bpf_rdesc;
1262 if (WARN_ON(!start))
1263 return -ENODEV;
1264 size = device->bpf_rsize;
1265
1266 if (device->driver->report_fixup) {
1267 /*
1268 * device->driver->report_fixup() needs to work
1269 * on a copy of our report descriptor so it can
1270 * change it.
1271 */
1272 __u8 *buf = kmemdup(start, size, GFP_KERNEL);
1273
1274 if (buf == NULL)
1275 return -ENOMEM;
1276
1277 start = device->driver->report_fixup(device, buf, &size);
1278
1279 /*
1280 * The second kmemdup is required in case report_fixup() returns
1281 * a static read-only memory, but we have no idea if that memory
1282 * needs to be cleaned up or not at the end.
1283 */
1284 start = kmemdup(start, size, GFP_KERNEL);
1285 kfree(buf);
1286 if (start == NULL)
1287 return -ENOMEM;
1288 }
1289
1290 device->rdesc = start;
1291 device->rsize = size;
1292
1293 parser = vzalloc(sizeof(struct hid_parser));
1294 if (!parser) {
1295 ret = -ENOMEM;
1296 goto alloc_err;
1297 }
1298
1299 parser->device = device;
1300
1301 end = start + size;
1302
1303 device->collection = kcalloc(HID_DEFAULT_NUM_COLLECTIONS,
1304 sizeof(struct hid_collection), GFP_KERNEL);
1305 if (!device->collection) {
1306 ret = -ENOMEM;
1307 goto err;
1308 }
1309 device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
1310 for (i = 0; i < HID_DEFAULT_NUM_COLLECTIONS; i++)
1311 device->collection[i].parent_idx = -1;
1312
1313 ret = -EINVAL;
1314 while ((next = fetch_item(start, end, &item)) != NULL) {
1315 start = next;
1316
1317 if (item.format != HID_ITEM_FORMAT_SHORT) {
1318 hid_err(device, "unexpected long global item\n");
1319 goto err;
1320 }
1321
1322 if (dispatch_type[item.type](parser, &item)) {
1323 hid_err(device, "item %u %u %u %u parsing failed\n",
1324 item.format, (unsigned)item.size,
1325 (unsigned)item.type, (unsigned)item.tag);
1326 goto err;
1327 }
1328
1329 if (start == end) {
1330 if (parser->collection_stack_ptr) {
1331 hid_err(device, "unbalanced collection at end of report description\n");
1332 goto err;
1333 }
1334 if (parser->local.delimiter_depth) {
1335 hid_err(device, "unbalanced delimiter at end of report description\n");
1336 goto err;
1337 }
1338
1339 /*
1340 * fetch initial values in case the device's
1341 * default multiplier isn't the recommended 1
1342 */
1343 hid_setup_resolution_multiplier(device);
1344
1345 kfree(parser->collection_stack);
1346 vfree(parser);
1347 device->status |= HID_STAT_PARSED;
1348
1349 return 0;
1350 }
1351 }
1352
1353 hid_err(device, "item fetching failed at offset %u/%u\n",
1354 size - (unsigned int)(end - start), size);
1355 err:
1356 kfree(parser->collection_stack);
1357 alloc_err:
1358 vfree(parser);
1359 hid_close_report(device);
1360 return ret;
1361 }
1362 EXPORT_SYMBOL_GPL(hid_open_report);
1363
1364 /*
1365 * Extract/implement a data field from/to a little endian report (bit array).
1366 *
1367 * Code sort-of follows HID spec:
1368 * http://www.usb.org/developers/hidpage/HID1_11.pdf
1369 *
1370 * While the USB HID spec allows unlimited length bit fields in "report
1371 * descriptors", most devices never use more than 16 bits.
1372 * One model of UPS is claimed to report "LINEV" as a 32-bit field.
1373 * Search linux-kernel and linux-usb-devel archives for "hid-core extract".
1374 */
1375
__extract(u8 * report,unsigned offset,int n)1376 static u32 __extract(u8 *report, unsigned offset, int n)
1377 {
1378 unsigned int idx = offset / 8;
1379 unsigned int bit_nr = 0;
1380 unsigned int bit_shift = offset % 8;
1381 int bits_to_copy = 8 - bit_shift;
1382 u32 value = 0;
1383 u32 mask = n < 32 ? (1U << n) - 1 : ~0U;
1384
1385 while (n > 0) {
1386 value |= ((u32)report[idx] >> bit_shift) << bit_nr;
1387 n -= bits_to_copy;
1388 bit_nr += bits_to_copy;
1389 bits_to_copy = 8;
1390 bit_shift = 0;
1391 idx++;
1392 }
1393
1394 return value & mask;
1395 }
1396
hid_field_extract(const struct hid_device * hid,u8 * report,unsigned offset,unsigned n)1397 u32 hid_field_extract(const struct hid_device *hid, u8 *report,
1398 unsigned offset, unsigned n)
1399 {
1400 if (n > 32) {
1401 hid_warn_once(hid, "%s() called with n (%d) > 32! (%s)\n",
1402 __func__, n, current->comm);
1403 n = 32;
1404 }
1405
1406 return __extract(report, offset, n);
1407 }
1408 EXPORT_SYMBOL_GPL(hid_field_extract);
1409
1410 /*
1411 * "implement" : set bits in a little endian bit stream.
1412 * Same concepts as "extract" (see comments above).
1413 * The data mangled in the bit stream remains in little endian
1414 * order the whole time. It make more sense to talk about
1415 * endianness of register values by considering a register
1416 * a "cached" copy of the little endian bit stream.
1417 */
1418
__implement(u8 * report,unsigned offset,int n,u32 value)1419 static void __implement(u8 *report, unsigned offset, int n, u32 value)
1420 {
1421 unsigned int idx = offset / 8;
1422 unsigned int bit_shift = offset % 8;
1423 int bits_to_set = 8 - bit_shift;
1424
1425 while (n - bits_to_set >= 0) {
1426 report[idx] &= ~(0xff << bit_shift);
1427 report[idx] |= value << bit_shift;
1428 value >>= bits_to_set;
1429 n -= bits_to_set;
1430 bits_to_set = 8;
1431 bit_shift = 0;
1432 idx++;
1433 }
1434
1435 /* last nibble */
1436 if (n) {
1437 u8 bit_mask = ((1U << n) - 1);
1438 report[idx] &= ~(bit_mask << bit_shift);
1439 report[idx] |= value << bit_shift;
1440 }
1441 }
1442
implement(const struct hid_device * hid,u8 * report,unsigned offset,unsigned n,u32 value)1443 static void implement(const struct hid_device *hid, u8 *report,
1444 unsigned offset, unsigned n, u32 value)
1445 {
1446 if (unlikely(n > 32)) {
1447 hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n",
1448 __func__, n, current->comm);
1449 n = 32;
1450 } else if (n < 32) {
1451 u32 m = (1U << n) - 1;
1452
1453 if (unlikely(value > m)) {
1454 hid_warn(hid,
1455 "%s() called with too large value %d (n: %d)! (%s)\n",
1456 __func__, value, n, current->comm);
1457 value &= m;
1458 }
1459 }
1460
1461 __implement(report, offset, n, value);
1462 }
1463
1464 /*
1465 * Search an array for a value.
1466 */
1467
search(__s32 * array,__s32 value,unsigned n)1468 static int search(__s32 *array, __s32 value, unsigned n)
1469 {
1470 while (n--) {
1471 if (*array++ == value)
1472 return 0;
1473 }
1474 return -1;
1475 }
1476
1477 /**
1478 * hid_match_report - check if driver's raw_event should be called
1479 *
1480 * @hid: hid device
1481 * @report: hid report to match against
1482 *
1483 * compare hid->driver->report_table->report_type to report->type
1484 */
hid_match_report(struct hid_device * hid,struct hid_report * report)1485 static int hid_match_report(struct hid_device *hid, struct hid_report *report)
1486 {
1487 const struct hid_report_id *id = hid->driver->report_table;
1488
1489 if (!id) /* NULL means all */
1490 return 1;
1491
1492 for (; id->report_type != HID_TERMINATOR; id++)
1493 if (id->report_type == HID_ANY_ID ||
1494 id->report_type == report->type)
1495 return 1;
1496 return 0;
1497 }
1498
1499 /**
1500 * hid_match_usage - check if driver's event should be called
1501 *
1502 * @hid: hid device
1503 * @usage: usage to match against
1504 *
1505 * compare hid->driver->usage_table->usage_{type,code} to
1506 * usage->usage_{type,code}
1507 */
hid_match_usage(struct hid_device * hid,struct hid_usage * usage)1508 static int hid_match_usage(struct hid_device *hid, struct hid_usage *usage)
1509 {
1510 const struct hid_usage_id *id = hid->driver->usage_table;
1511
1512 if (!id) /* NULL means all */
1513 return 1;
1514
1515 for (; id->usage_type != HID_ANY_ID - 1; id++)
1516 if ((id->usage_hid == HID_ANY_ID ||
1517 id->usage_hid == usage->hid) &&
1518 (id->usage_type == HID_ANY_ID ||
1519 id->usage_type == usage->type) &&
1520 (id->usage_code == HID_ANY_ID ||
1521 id->usage_code == usage->code))
1522 return 1;
1523 return 0;
1524 }
1525
hid_process_event(struct hid_device * hid,struct hid_field * field,struct hid_usage * usage,__s32 value,int interrupt)1526 static void hid_process_event(struct hid_device *hid, struct hid_field *field,
1527 struct hid_usage *usage, __s32 value, int interrupt)
1528 {
1529 struct hid_driver *hdrv = hid->driver;
1530 int ret;
1531
1532 if (!list_empty(&hid->debug_list))
1533 hid_dump_input(hid, usage, value);
1534
1535 if (hdrv && hdrv->event && hid_match_usage(hid, usage)) {
1536 ret = hdrv->event(hid, field, usage, value);
1537 if (ret != 0) {
1538 if (ret < 0)
1539 hid_err(hid, "%s's event failed with %d\n",
1540 hdrv->name, ret);
1541 return;
1542 }
1543 }
1544
1545 if (hid->claimed & HID_CLAIMED_INPUT)
1546 hidinput_hid_event(hid, field, usage, value);
1547 if (hid->claimed & HID_CLAIMED_HIDDEV && interrupt && hid->hiddev_hid_event)
1548 hid->hiddev_hid_event(hid, field, usage, value);
1549 }
1550
1551 /*
1552 * Checks if the given value is valid within this field
1553 */
hid_array_value_is_valid(struct hid_field * field,__s32 value)1554 static inline int hid_array_value_is_valid(struct hid_field *field,
1555 __s32 value)
1556 {
1557 __s32 min = field->logical_minimum;
1558
1559 /*
1560 * Value needs to be between logical min and max, and
1561 * (value - min) is used as an index in the usage array.
1562 * This array is of size field->maxusage
1563 */
1564 return value >= min &&
1565 value <= field->logical_maximum &&
1566 value - min < field->maxusage;
1567 }
1568
1569 /*
1570 * Fetch the field from the data. The field content is stored for next
1571 * report processing (we do differential reporting to the layer).
1572 */
hid_input_fetch_field(struct hid_device * hid,struct hid_field * field,__u8 * data)1573 static void hid_input_fetch_field(struct hid_device *hid,
1574 struct hid_field *field,
1575 __u8 *data)
1576 {
1577 unsigned n;
1578 unsigned count = field->report_count;
1579 unsigned offset = field->report_offset;
1580 unsigned size = field->report_size;
1581 __s32 min = field->logical_minimum;
1582 __s32 *value;
1583
1584 value = field->new_value;
1585 memset(value, 0, count * sizeof(__s32));
1586 field->ignored = false;
1587
1588 for (n = 0; n < count; n++) {
1589
1590 value[n] = min < 0 ?
1591 snto32(hid_field_extract(hid, data, offset + n * size,
1592 size), size) :
1593 hid_field_extract(hid, data, offset + n * size, size);
1594
1595 /* Ignore report if ErrorRollOver */
1596 if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
1597 hid_array_value_is_valid(field, value[n]) &&
1598 field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1) {
1599 field->ignored = true;
1600 return;
1601 }
1602 }
1603 }
1604
1605 /*
1606 * Process a received variable field.
1607 */
1608
hid_input_var_field(struct hid_device * hid,struct hid_field * field,int interrupt)1609 static void hid_input_var_field(struct hid_device *hid,
1610 struct hid_field *field,
1611 int interrupt)
1612 {
1613 unsigned int count = field->report_count;
1614 __s32 *value = field->new_value;
1615 unsigned int n;
1616
1617 for (n = 0; n < count; n++)
1618 hid_process_event(hid,
1619 field,
1620 &field->usage[n],
1621 value[n],
1622 interrupt);
1623
1624 memcpy(field->value, value, count * sizeof(__s32));
1625 }
1626
1627 /*
1628 * Process a received array field. The field content is stored for
1629 * next report processing (we do differential reporting to the layer).
1630 */
1631
hid_input_array_field(struct hid_device * hid,struct hid_field * field,int interrupt)1632 static void hid_input_array_field(struct hid_device *hid,
1633 struct hid_field *field,
1634 int interrupt)
1635 {
1636 unsigned int n;
1637 unsigned int count = field->report_count;
1638 __s32 min = field->logical_minimum;
1639 __s32 *value;
1640
1641 value = field->new_value;
1642
1643 /* ErrorRollOver */
1644 if (field->ignored)
1645 return;
1646
1647 for (n = 0; n < count; n++) {
1648 if (hid_array_value_is_valid(field, field->value[n]) &&
1649 search(value, field->value[n], count))
1650 hid_process_event(hid,
1651 field,
1652 &field->usage[field->value[n] - min],
1653 0,
1654 interrupt);
1655
1656 if (hid_array_value_is_valid(field, value[n]) &&
1657 search(field->value, value[n], count))
1658 hid_process_event(hid,
1659 field,
1660 &field->usage[value[n] - min],
1661 1,
1662 interrupt);
1663 }
1664
1665 memcpy(field->value, value, count * sizeof(__s32));
1666 }
1667
1668 /*
1669 * Analyse a received report, and fetch the data from it. The field
1670 * content is stored for next report processing (we do differential
1671 * reporting to the layer).
1672 */
hid_process_report(struct hid_device * hid,struct hid_report * report,__u8 * data,int interrupt)1673 static void hid_process_report(struct hid_device *hid,
1674 struct hid_report *report,
1675 __u8 *data,
1676 int interrupt)
1677 {
1678 unsigned int a;
1679 struct hid_field_entry *entry;
1680 struct hid_field *field;
1681
1682 /* first retrieve all incoming values in data */
1683 for (a = 0; a < report->maxfield; a++)
1684 hid_input_fetch_field(hid, report->field[a], data);
1685
1686 if (!list_empty(&report->field_entry_list)) {
1687 /* INPUT_REPORT, we have a priority list of fields */
1688 list_for_each_entry(entry,
1689 &report->field_entry_list,
1690 list) {
1691 field = entry->field;
1692
1693 if (field->flags & HID_MAIN_ITEM_VARIABLE)
1694 hid_process_event(hid,
1695 field,
1696 &field->usage[entry->index],
1697 field->new_value[entry->index],
1698 interrupt);
1699 else
1700 hid_input_array_field(hid, field, interrupt);
1701 }
1702
1703 /* we need to do the memcpy at the end for var items */
1704 for (a = 0; a < report->maxfield; a++) {
1705 field = report->field[a];
1706
1707 if (field->flags & HID_MAIN_ITEM_VARIABLE)
1708 memcpy(field->value, field->new_value,
1709 field->report_count * sizeof(__s32));
1710 }
1711 } else {
1712 /* FEATURE_REPORT, regular processing */
1713 for (a = 0; a < report->maxfield; a++) {
1714 field = report->field[a];
1715
1716 if (field->flags & HID_MAIN_ITEM_VARIABLE)
1717 hid_input_var_field(hid, field, interrupt);
1718 else
1719 hid_input_array_field(hid, field, interrupt);
1720 }
1721 }
1722 }
1723
1724 /*
1725 * Insert a given usage_index in a field in the list
1726 * of processed usages in the report.
1727 *
1728 * The elements of lower priority score are processed
1729 * first.
1730 */
__hid_insert_field_entry(struct hid_device * hid,struct hid_report * report,struct hid_field_entry * entry,struct hid_field * field,unsigned int usage_index)1731 static void __hid_insert_field_entry(struct hid_device *hid,
1732 struct hid_report *report,
1733 struct hid_field_entry *entry,
1734 struct hid_field *field,
1735 unsigned int usage_index)
1736 {
1737 struct hid_field_entry *next;
1738
1739 entry->field = field;
1740 entry->index = usage_index;
1741 entry->priority = field->usages_priorities[usage_index];
1742
1743 /* insert the element at the correct position */
1744 list_for_each_entry(next,
1745 &report->field_entry_list,
1746 list) {
1747 /*
1748 * the priority of our element is strictly higher
1749 * than the next one, insert it before
1750 */
1751 if (entry->priority > next->priority) {
1752 list_add_tail(&entry->list, &next->list);
1753 return;
1754 }
1755 }
1756
1757 /* lowest priority score: insert at the end */
1758 list_add_tail(&entry->list, &report->field_entry_list);
1759 }
1760
hid_report_process_ordering(struct hid_device * hid,struct hid_report * report)1761 static void hid_report_process_ordering(struct hid_device *hid,
1762 struct hid_report *report)
1763 {
1764 struct hid_field *field;
1765 struct hid_field_entry *entries;
1766 unsigned int a, u, usages;
1767 unsigned int count = 0;
1768
1769 /* count the number of individual fields in the report */
1770 for (a = 0; a < report->maxfield; a++) {
1771 field = report->field[a];
1772
1773 if (field->flags & HID_MAIN_ITEM_VARIABLE)
1774 count += field->report_count;
1775 else
1776 count++;
1777 }
1778
1779 /* allocate the memory to process the fields */
1780 entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
1781 if (!entries)
1782 return;
1783
1784 report->field_entries = entries;
1785
1786 /*
1787 * walk through all fields in the report and
1788 * store them by priority order in report->field_entry_list
1789 *
1790 * - Var elements are individualized (field + usage_index)
1791 * - Arrays are taken as one, we can not chose an order for them
1792 */
1793 usages = 0;
1794 for (a = 0; a < report->maxfield; a++) {
1795 field = report->field[a];
1796
1797 if (field->flags & HID_MAIN_ITEM_VARIABLE) {
1798 for (u = 0; u < field->report_count; u++) {
1799 __hid_insert_field_entry(hid, report,
1800 &entries[usages],
1801 field, u);
1802 usages++;
1803 }
1804 } else {
1805 __hid_insert_field_entry(hid, report, &entries[usages],
1806 field, 0);
1807 usages++;
1808 }
1809 }
1810 }
1811
hid_process_ordering(struct hid_device * hid)1812 static void hid_process_ordering(struct hid_device *hid)
1813 {
1814 struct hid_report *report;
1815 struct hid_report_enum *report_enum = &hid->report_enum[HID_INPUT_REPORT];
1816
1817 list_for_each_entry(report, &report_enum->report_list, list)
1818 hid_report_process_ordering(hid, report);
1819 }
1820
1821 /*
1822 * Output the field into the report.
1823 */
1824
hid_output_field(const struct hid_device * hid,struct hid_field * field,__u8 * data)1825 static void hid_output_field(const struct hid_device *hid,
1826 struct hid_field *field, __u8 *data)
1827 {
1828 unsigned count = field->report_count;
1829 unsigned offset = field->report_offset;
1830 unsigned size = field->report_size;
1831 unsigned n;
1832
1833 for (n = 0; n < count; n++) {
1834 if (field->logical_minimum < 0) /* signed values */
1835 implement(hid, data, offset + n * size, size,
1836 s32ton(field->value[n], size));
1837 else /* unsigned values */
1838 implement(hid, data, offset + n * size, size,
1839 field->value[n]);
1840 }
1841 }
1842
1843 /*
1844 * Compute the size of a report.
1845 */
hid_compute_report_size(struct hid_report * report)1846 static size_t hid_compute_report_size(struct hid_report *report)
1847 {
1848 if (report->size)
1849 return ((report->size - 1) >> 3) + 1;
1850
1851 return 0;
1852 }
1853
1854 /*
1855 * Create a report. 'data' has to be allocated using
1856 * hid_alloc_report_buf() so that it has proper size.
1857 */
1858
hid_output_report(struct hid_report * report,__u8 * data)1859 void hid_output_report(struct hid_report *report, __u8 *data)
1860 {
1861 unsigned n;
1862
1863 if (report->id > 0)
1864 *data++ = report->id;
1865
1866 memset(data, 0, hid_compute_report_size(report));
1867 for (n = 0; n < report->maxfield; n++)
1868 hid_output_field(report->device, report->field[n], data);
1869 }
1870 EXPORT_SYMBOL_GPL(hid_output_report);
1871
1872 /*
1873 * Allocator for buffer that is going to be passed to hid_output_report()
1874 */
hid_alloc_report_buf(struct hid_report * report,gfp_t flags)1875 u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
1876 {
1877 /*
1878 * 7 extra bytes are necessary to achieve proper functionality
1879 * of implement() working on 8 byte chunks
1880 */
1881
1882 u32 len = hid_report_len(report) + 7;
1883
1884 return kzalloc(len, flags);
1885 }
1886 EXPORT_SYMBOL_GPL(hid_alloc_report_buf);
1887
1888 /*
1889 * Set a field value. The report this field belongs to has to be
1890 * created and transferred to the device, to set this value in the
1891 * device.
1892 */
1893
hid_set_field(struct hid_field * field,unsigned offset,__s32 value)1894 int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
1895 {
1896 unsigned size;
1897
1898 if (!field)
1899 return -1;
1900
1901 size = field->report_size;
1902
1903 hid_dump_input(field->report->device, field->usage + offset, value);
1904
1905 if (offset >= field->report_count) {
1906 hid_err(field->report->device, "offset (%d) exceeds report_count (%d)\n",
1907 offset, field->report_count);
1908 return -1;
1909 }
1910 if (field->logical_minimum < 0) {
1911 if (value != snto32(s32ton(value, size), size)) {
1912 hid_err(field->report->device, "value %d is out of range\n", value);
1913 return -1;
1914 }
1915 }
1916 field->value[offset] = value;
1917 return 0;
1918 }
1919 EXPORT_SYMBOL_GPL(hid_set_field);
1920
hid_find_field(struct hid_device * hdev,unsigned int report_type,unsigned int application,unsigned int usage)1921 struct hid_field *hid_find_field(struct hid_device *hdev, unsigned int report_type,
1922 unsigned int application, unsigned int usage)
1923 {
1924 struct list_head *report_list = &hdev->report_enum[report_type].report_list;
1925 struct hid_report *report;
1926 int i, j;
1927
1928 list_for_each_entry(report, report_list, list) {
1929 if (report->application != application)
1930 continue;
1931
1932 for (i = 0; i < report->maxfield; i++) {
1933 struct hid_field *field = report->field[i];
1934
1935 for (j = 0; j < field->maxusage; j++) {
1936 if (field->usage[j].hid == usage)
1937 return field;
1938 }
1939 }
1940 }
1941
1942 return NULL;
1943 }
1944 EXPORT_SYMBOL_GPL(hid_find_field);
1945
hid_get_report(struct hid_report_enum * report_enum,const u8 * data)1946 static struct hid_report *hid_get_report(struct hid_report_enum *report_enum,
1947 const u8 *data)
1948 {
1949 struct hid_report *report;
1950 unsigned int n = 0; /* Normally report number is 0 */
1951
1952 /* Device uses numbered reports, data[0] is report number */
1953 if (report_enum->numbered)
1954 n = *data;
1955
1956 report = report_enum->report_id_hash[n];
1957 if (report == NULL)
1958 dbg_hid("undefined report_id %u received\n", n);
1959
1960 return report;
1961 }
1962
1963 /*
1964 * Implement a generic .request() callback, using .raw_request()
1965 * DO NOT USE in hid drivers directly, but through hid_hw_request instead.
1966 */
__hid_request(struct hid_device * hid,struct hid_report * report,enum hid_class_request reqtype)1967 int __hid_request(struct hid_device *hid, struct hid_report *report,
1968 enum hid_class_request reqtype)
1969 {
1970 char *buf;
1971 int ret;
1972 u32 len;
1973
1974 buf = hid_alloc_report_buf(report, GFP_KERNEL);
1975 if (!buf)
1976 return -ENOMEM;
1977
1978 len = hid_report_len(report);
1979
1980 if (reqtype == HID_REQ_SET_REPORT)
1981 hid_output_report(report, buf);
1982
1983 ret = hid->ll_driver->raw_request(hid, report->id, buf, len,
1984 report->type, reqtype);
1985 if (ret < 0) {
1986 dbg_hid("unable to complete request: %d\n", ret);
1987 goto out;
1988 }
1989
1990 if (reqtype == HID_REQ_GET_REPORT)
1991 hid_input_report(hid, report->type, buf, ret, 0);
1992
1993 ret = 0;
1994
1995 out:
1996 kfree(buf);
1997 return ret;
1998 }
1999 EXPORT_SYMBOL_GPL(__hid_request);
2000
hid_report_raw_event(struct hid_device * hid,enum hid_report_type type,u8 * data,u32 size,int interrupt)2001 int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
2002 int interrupt)
2003 {
2004 struct hid_report_enum *report_enum = hid->report_enum + type;
2005 struct hid_report *report;
2006 struct hid_driver *hdrv;
2007 int max_buffer_size = HID_MAX_BUFFER_SIZE;
2008 u32 rsize, csize = size;
2009 u8 *cdata = data;
2010 int ret = 0;
2011
2012 report = hid_get_report(report_enum, data);
2013 if (!report)
2014 goto out;
2015
2016 if (report_enum->numbered) {
2017 cdata++;
2018 csize--;
2019 }
2020
2021 rsize = hid_compute_report_size(report);
2022
2023 if (hid->ll_driver->max_buffer_size)
2024 max_buffer_size = hid->ll_driver->max_buffer_size;
2025
2026 if (report_enum->numbered && rsize >= max_buffer_size)
2027 rsize = max_buffer_size - 1;
2028 else if (rsize > max_buffer_size)
2029 rsize = max_buffer_size;
2030
2031 if (csize < rsize) {
2032 dbg_hid("report %d is too short, (%d < %d)\n", report->id,
2033 csize, rsize);
2034 memset(cdata + csize, 0, rsize - csize);
2035 }
2036
2037 if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event)
2038 hid->hiddev_report_event(hid, report);
2039 if (hid->claimed & HID_CLAIMED_HIDRAW) {
2040 ret = hidraw_report_event(hid, data, size);
2041 if (ret)
2042 goto out;
2043 }
2044
2045 if (hid->claimed != HID_CLAIMED_HIDRAW && report->maxfield) {
2046 hid_process_report(hid, report, cdata, interrupt);
2047 hdrv = hid->driver;
2048 if (hdrv && hdrv->report)
2049 hdrv->report(hid, report);
2050 }
2051
2052 if (hid->claimed & HID_CLAIMED_INPUT)
2053 hidinput_report_event(hid, report);
2054 out:
2055 return ret;
2056 }
2057 EXPORT_SYMBOL_GPL(hid_report_raw_event);
2058
2059
__hid_input_report(struct hid_device * hid,enum hid_report_type type,u8 * data,u32 size,int interrupt,u64 source,bool from_bpf,bool lock_already_taken)2060 static int __hid_input_report(struct hid_device *hid, enum hid_report_type type,
2061 u8 *data, u32 size, int interrupt, u64 source, bool from_bpf,
2062 bool lock_already_taken)
2063 {
2064 struct hid_report_enum *report_enum;
2065 struct hid_driver *hdrv;
2066 struct hid_report *report;
2067 int ret = 0;
2068
2069 if (!hid)
2070 return -ENODEV;
2071
2072 ret = down_trylock(&hid->driver_input_lock);
2073 if (lock_already_taken && !ret) {
2074 up(&hid->driver_input_lock);
2075 return -EINVAL;
2076 } else if (!lock_already_taken && ret) {
2077 return -EBUSY;
2078 }
2079
2080 if (!hid->driver) {
2081 ret = -ENODEV;
2082 goto unlock;
2083 }
2084 report_enum = hid->report_enum + type;
2085 hdrv = hid->driver;
2086
2087 data = dispatch_hid_bpf_device_event(hid, type, data, &size, interrupt, source, from_bpf);
2088 if (IS_ERR(data)) {
2089 ret = PTR_ERR(data);
2090 goto unlock;
2091 }
2092
2093 if (!size) {
2094 dbg_hid("empty report\n");
2095 ret = -1;
2096 goto unlock;
2097 }
2098
2099 /* Avoid unnecessary overhead if debugfs is disabled */
2100 if (!list_empty(&hid->debug_list))
2101 hid_dump_report(hid, type, data, size);
2102
2103 report = hid_get_report(report_enum, data);
2104
2105 if (!report) {
2106 ret = -1;
2107 goto unlock;
2108 }
2109
2110 if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) {
2111 ret = hdrv->raw_event(hid, report, data, size);
2112 if (ret < 0)
2113 goto unlock;
2114 }
2115
2116 ret = hid_report_raw_event(hid, type, data, size, interrupt);
2117
2118 unlock:
2119 if (!lock_already_taken)
2120 up(&hid->driver_input_lock);
2121 return ret;
2122 }
2123
2124 /**
2125 * hid_input_report - report data from lower layer (usb, bt...)
2126 *
2127 * @hid: hid device
2128 * @type: HID report type (HID_*_REPORT)
2129 * @data: report contents
2130 * @size: size of data parameter
2131 * @interrupt: distinguish between interrupt and control transfers
2132 *
2133 * This is data entry for lower layers.
2134 */
hid_input_report(struct hid_device * hid,enum hid_report_type type,u8 * data,u32 size,int interrupt)2135 int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
2136 int interrupt)
2137 {
2138 return __hid_input_report(hid, type, data, size, interrupt, 0,
2139 false, /* from_bpf */
2140 false /* lock_already_taken */);
2141 }
2142 EXPORT_SYMBOL_GPL(hid_input_report);
2143
hid_match_one_id(const struct hid_device * hdev,const struct hid_device_id * id)2144 bool hid_match_one_id(const struct hid_device *hdev,
2145 const struct hid_device_id *id)
2146 {
2147 return (id->bus == HID_BUS_ANY || id->bus == hdev->bus) &&
2148 (id->group == HID_GROUP_ANY || id->group == hdev->group) &&
2149 (id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) &&
2150 (id->product == HID_ANY_ID || id->product == hdev->product);
2151 }
2152
hid_match_id(const struct hid_device * hdev,const struct hid_device_id * id)2153 const struct hid_device_id *hid_match_id(const struct hid_device *hdev,
2154 const struct hid_device_id *id)
2155 {
2156 for (; id->bus; id++)
2157 if (hid_match_one_id(hdev, id))
2158 return id;
2159
2160 return NULL;
2161 }
2162 EXPORT_SYMBOL_GPL(hid_match_id);
2163
2164 static const struct hid_device_id hid_hiddev_list[] = {
2165 { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS) },
2166 { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS1) },
2167 { }
2168 };
2169
hid_hiddev(struct hid_device * hdev)2170 static bool hid_hiddev(struct hid_device *hdev)
2171 {
2172 return !!hid_match_id(hdev, hid_hiddev_list);
2173 }
2174
2175
2176 static ssize_t
read_report_descriptor(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t off,size_t count)2177 read_report_descriptor(struct file *filp, struct kobject *kobj,
2178 struct bin_attribute *attr,
2179 char *buf, loff_t off, size_t count)
2180 {
2181 struct device *dev = kobj_to_dev(kobj);
2182 struct hid_device *hdev = to_hid_device(dev);
2183
2184 if (off >= hdev->rsize)
2185 return 0;
2186
2187 if (off + count > hdev->rsize)
2188 count = hdev->rsize - off;
2189
2190 memcpy(buf, hdev->rdesc + off, count);
2191
2192 return count;
2193 }
2194
2195 static ssize_t
show_country(struct device * dev,struct device_attribute * attr,char * buf)2196 show_country(struct device *dev, struct device_attribute *attr,
2197 char *buf)
2198 {
2199 struct hid_device *hdev = to_hid_device(dev);
2200
2201 return sprintf(buf, "%02x\n", hdev->country & 0xff);
2202 }
2203
2204 static struct bin_attribute dev_bin_attr_report_desc = {
2205 .attr = { .name = "report_descriptor", .mode = 0444 },
2206 .read = read_report_descriptor,
2207 .size = HID_MAX_DESCRIPTOR_SIZE,
2208 };
2209
2210 static const struct device_attribute dev_attr_country = {
2211 .attr = { .name = "country", .mode = 0444 },
2212 .show = show_country,
2213 };
2214
hid_connect(struct hid_device * hdev,unsigned int connect_mask)2215 int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
2216 {
2217 static const char *types[] = { "Device", "Pointer", "Mouse", "Device",
2218 "Joystick", "Gamepad", "Keyboard", "Keypad",
2219 "Multi-Axis Controller"
2220 };
2221 const char *type, *bus;
2222 char buf[64] = "";
2223 unsigned int i;
2224 int len;
2225 int ret;
2226
2227 ret = hid_bpf_connect_device(hdev);
2228 if (ret)
2229 return ret;
2230
2231 if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE)
2232 connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV);
2233 if (hdev->quirks & HID_QUIRK_HIDINPUT_FORCE)
2234 connect_mask |= HID_CONNECT_HIDINPUT_FORCE;
2235 if (hdev->bus != BUS_USB)
2236 connect_mask &= ~HID_CONNECT_HIDDEV;
2237 if (hid_hiddev(hdev))
2238 connect_mask |= HID_CONNECT_HIDDEV_FORCE;
2239
2240 if ((connect_mask & HID_CONNECT_HIDINPUT) && !hidinput_connect(hdev,
2241 connect_mask & HID_CONNECT_HIDINPUT_FORCE))
2242 hdev->claimed |= HID_CLAIMED_INPUT;
2243
2244 if ((connect_mask & HID_CONNECT_HIDDEV) && hdev->hiddev_connect &&
2245 !hdev->hiddev_connect(hdev,
2246 connect_mask & HID_CONNECT_HIDDEV_FORCE))
2247 hdev->claimed |= HID_CLAIMED_HIDDEV;
2248 if ((connect_mask & HID_CONNECT_HIDRAW) && !hidraw_connect(hdev))
2249 hdev->claimed |= HID_CLAIMED_HIDRAW;
2250
2251 if (connect_mask & HID_CONNECT_DRIVER)
2252 hdev->claimed |= HID_CLAIMED_DRIVER;
2253
2254 /* Drivers with the ->raw_event callback set are not required to connect
2255 * to any other listener. */
2256 if (!hdev->claimed && !hdev->driver->raw_event) {
2257 hid_err(hdev, "device has no listeners, quitting\n");
2258 return -ENODEV;
2259 }
2260
2261 hid_process_ordering(hdev);
2262
2263 if ((hdev->claimed & HID_CLAIMED_INPUT) &&
2264 (connect_mask & HID_CONNECT_FF) && hdev->ff_init)
2265 hdev->ff_init(hdev);
2266
2267 len = 0;
2268 if (hdev->claimed & HID_CLAIMED_INPUT)
2269 len += sprintf(buf + len, "input");
2270 if (hdev->claimed & HID_CLAIMED_HIDDEV)
2271 len += sprintf(buf + len, "%shiddev%d", len ? "," : "",
2272 ((struct hiddev *)hdev->hiddev)->minor);
2273 if (hdev->claimed & HID_CLAIMED_HIDRAW)
2274 len += sprintf(buf + len, "%shidraw%d", len ? "," : "",
2275 ((struct hidraw *)hdev->hidraw)->minor);
2276
2277 type = "Device";
2278 for (i = 0; i < hdev->maxcollection; i++) {
2279 struct hid_collection *col = &hdev->collection[i];
2280 if (col->type == HID_COLLECTION_APPLICATION &&
2281 (col->usage & HID_USAGE_PAGE) == HID_UP_GENDESK &&
2282 (col->usage & 0xffff) < ARRAY_SIZE(types)) {
2283 type = types[col->usage & 0xffff];
2284 break;
2285 }
2286 }
2287
2288 switch (hdev->bus) {
2289 case BUS_USB:
2290 bus = "USB";
2291 break;
2292 case BUS_BLUETOOTH:
2293 bus = "BLUETOOTH";
2294 break;
2295 case BUS_I2C:
2296 bus = "I2C";
2297 break;
2298 case BUS_VIRTUAL:
2299 bus = "VIRTUAL";
2300 break;
2301 case BUS_INTEL_ISHTP:
2302 case BUS_AMD_SFH:
2303 bus = "SENSOR HUB";
2304 break;
2305 default:
2306 bus = "<UNKNOWN>";
2307 }
2308
2309 ret = device_create_file(&hdev->dev, &dev_attr_country);
2310 if (ret)
2311 hid_warn(hdev,
2312 "can't create sysfs country code attribute err: %d\n", ret);
2313
2314 hid_info(hdev, "%s: %s HID v%x.%02x %s [%s] on %s\n",
2315 buf, bus, hdev->version >> 8, hdev->version & 0xff,
2316 type, hdev->name, hdev->phys);
2317
2318 return 0;
2319 }
2320 EXPORT_SYMBOL_GPL(hid_connect);
2321
hid_disconnect(struct hid_device * hdev)2322 void hid_disconnect(struct hid_device *hdev)
2323 {
2324 device_remove_file(&hdev->dev, &dev_attr_country);
2325 if (hdev->claimed & HID_CLAIMED_INPUT)
2326 hidinput_disconnect(hdev);
2327 if (hdev->claimed & HID_CLAIMED_HIDDEV)
2328 hdev->hiddev_disconnect(hdev);
2329 if (hdev->claimed & HID_CLAIMED_HIDRAW)
2330 hidraw_disconnect(hdev);
2331 hdev->claimed = 0;
2332
2333 hid_bpf_disconnect_device(hdev);
2334 }
2335 EXPORT_SYMBOL_GPL(hid_disconnect);
2336
2337 /**
2338 * hid_hw_start - start underlying HW
2339 * @hdev: hid device
2340 * @connect_mask: which outputs to connect, see HID_CONNECT_*
2341 *
2342 * Call this in probe function *after* hid_parse. This will setup HW
2343 * buffers and start the device (if not defeirred to device open).
2344 * hid_hw_stop must be called if this was successful.
2345 */
hid_hw_start(struct hid_device * hdev,unsigned int connect_mask)2346 int hid_hw_start(struct hid_device *hdev, unsigned int connect_mask)
2347 {
2348 int error;
2349
2350 error = hdev->ll_driver->start(hdev);
2351 if (error)
2352 return error;
2353
2354 if (connect_mask) {
2355 error = hid_connect(hdev, connect_mask);
2356 if (error) {
2357 hdev->ll_driver->stop(hdev);
2358 return error;
2359 }
2360 }
2361
2362 return 0;
2363 }
2364 EXPORT_SYMBOL_GPL(hid_hw_start);
2365
2366 /**
2367 * hid_hw_stop - stop underlying HW
2368 * @hdev: hid device
2369 *
2370 * This is usually called from remove function or from probe when something
2371 * failed and hid_hw_start was called already.
2372 */
hid_hw_stop(struct hid_device * hdev)2373 void hid_hw_stop(struct hid_device *hdev)
2374 {
2375 hid_disconnect(hdev);
2376 hdev->ll_driver->stop(hdev);
2377 }
2378 EXPORT_SYMBOL_GPL(hid_hw_stop);
2379
2380 /**
2381 * hid_hw_open - signal underlying HW to start delivering events
2382 * @hdev: hid device
2383 *
2384 * Tell underlying HW to start delivering events from the device.
2385 * This function should be called sometime after successful call
2386 * to hid_hw_start().
2387 */
hid_hw_open(struct hid_device * hdev)2388 int hid_hw_open(struct hid_device *hdev)
2389 {
2390 int ret;
2391
2392 ret = mutex_lock_killable(&hdev->ll_open_lock);
2393 if (ret)
2394 return ret;
2395
2396 if (!hdev->ll_open_count++) {
2397 ret = hdev->ll_driver->open(hdev);
2398 if (ret)
2399 hdev->ll_open_count--;
2400 }
2401
2402 mutex_unlock(&hdev->ll_open_lock);
2403 return ret;
2404 }
2405 EXPORT_SYMBOL_GPL(hid_hw_open);
2406
2407 /**
2408 * hid_hw_close - signal underlaying HW to stop delivering events
2409 *
2410 * @hdev: hid device
2411 *
2412 * This function indicates that we are not interested in the events
2413 * from this device anymore. Delivery of events may or may not stop,
2414 * depending on the number of users still outstanding.
2415 */
hid_hw_close(struct hid_device * hdev)2416 void hid_hw_close(struct hid_device *hdev)
2417 {
2418 mutex_lock(&hdev->ll_open_lock);
2419 if (!--hdev->ll_open_count)
2420 hdev->ll_driver->close(hdev);
2421 mutex_unlock(&hdev->ll_open_lock);
2422 }
2423 EXPORT_SYMBOL_GPL(hid_hw_close);
2424
2425 /**
2426 * hid_hw_request - send report request to device
2427 *
2428 * @hdev: hid device
2429 * @report: report to send
2430 * @reqtype: hid request type
2431 */
hid_hw_request(struct hid_device * hdev,struct hid_report * report,enum hid_class_request reqtype)2432 void hid_hw_request(struct hid_device *hdev,
2433 struct hid_report *report, enum hid_class_request reqtype)
2434 {
2435 if (hdev->ll_driver->request)
2436 return hdev->ll_driver->request(hdev, report, reqtype);
2437
2438 __hid_request(hdev, report, reqtype);
2439 }
2440 EXPORT_SYMBOL_GPL(hid_hw_request);
2441
__hid_hw_raw_request(struct hid_device * hdev,unsigned char reportnum,__u8 * buf,size_t len,enum hid_report_type rtype,enum hid_class_request reqtype,u64 source,bool from_bpf)2442 int __hid_hw_raw_request(struct hid_device *hdev,
2443 unsigned char reportnum, __u8 *buf,
2444 size_t len, enum hid_report_type rtype,
2445 enum hid_class_request reqtype,
2446 u64 source, bool from_bpf)
2447 {
2448 unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
2449 int ret;
2450
2451 if (hdev->ll_driver->max_buffer_size)
2452 max_buffer_size = hdev->ll_driver->max_buffer_size;
2453
2454 if (len < 1 || len > max_buffer_size || !buf)
2455 return -EINVAL;
2456
2457 ret = dispatch_hid_bpf_raw_requests(hdev, reportnum, buf, len, rtype,
2458 reqtype, source, from_bpf);
2459 if (ret)
2460 return ret;
2461
2462 return hdev->ll_driver->raw_request(hdev, reportnum, buf, len,
2463 rtype, reqtype);
2464 }
2465
2466 /**
2467 * hid_hw_raw_request - send report request to device
2468 *
2469 * @hdev: hid device
2470 * @reportnum: report ID
2471 * @buf: in/out data to transfer
2472 * @len: length of buf
2473 * @rtype: HID report type
2474 * @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT
2475 *
2476 * Return: count of data transferred, negative if error
2477 *
2478 * Same behavior as hid_hw_request, but with raw buffers instead.
2479 */
hid_hw_raw_request(struct hid_device * hdev,unsigned char reportnum,__u8 * buf,size_t len,enum hid_report_type rtype,enum hid_class_request reqtype)2480 int hid_hw_raw_request(struct hid_device *hdev,
2481 unsigned char reportnum, __u8 *buf,
2482 size_t len, enum hid_report_type rtype, enum hid_class_request reqtype)
2483 {
2484 return __hid_hw_raw_request(hdev, reportnum, buf, len, rtype, reqtype, 0, false);
2485 }
2486 EXPORT_SYMBOL_GPL(hid_hw_raw_request);
2487
__hid_hw_output_report(struct hid_device * hdev,__u8 * buf,size_t len,u64 source,bool from_bpf)2488 int __hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len, u64 source,
2489 bool from_bpf)
2490 {
2491 unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
2492 int ret;
2493
2494 if (hdev->ll_driver->max_buffer_size)
2495 max_buffer_size = hdev->ll_driver->max_buffer_size;
2496
2497 if (len < 1 || len > max_buffer_size || !buf)
2498 return -EINVAL;
2499
2500 ret = dispatch_hid_bpf_output_report(hdev, buf, len, source, from_bpf);
2501 if (ret)
2502 return ret;
2503
2504 if (hdev->ll_driver->output_report)
2505 return hdev->ll_driver->output_report(hdev, buf, len);
2506
2507 return -ENOSYS;
2508 }
2509
2510 /**
2511 * hid_hw_output_report - send output report to device
2512 *
2513 * @hdev: hid device
2514 * @buf: raw data to transfer
2515 * @len: length of buf
2516 *
2517 * Return: count of data transferred, negative if error
2518 */
hid_hw_output_report(struct hid_device * hdev,__u8 * buf,size_t len)2519 int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len)
2520 {
2521 return __hid_hw_output_report(hdev, buf, len, 0, false);
2522 }
2523 EXPORT_SYMBOL_GPL(hid_hw_output_report);
2524
2525 #ifdef CONFIG_PM
hid_driver_suspend(struct hid_device * hdev,pm_message_t state)2526 int hid_driver_suspend(struct hid_device *hdev, pm_message_t state)
2527 {
2528 if (hdev->driver && hdev->driver->suspend)
2529 return hdev->driver->suspend(hdev, state);
2530
2531 return 0;
2532 }
2533 EXPORT_SYMBOL_GPL(hid_driver_suspend);
2534
hid_driver_reset_resume(struct hid_device * hdev)2535 int hid_driver_reset_resume(struct hid_device *hdev)
2536 {
2537 if (hdev->driver && hdev->driver->reset_resume)
2538 return hdev->driver->reset_resume(hdev);
2539
2540 return 0;
2541 }
2542 EXPORT_SYMBOL_GPL(hid_driver_reset_resume);
2543
hid_driver_resume(struct hid_device * hdev)2544 int hid_driver_resume(struct hid_device *hdev)
2545 {
2546 if (hdev->driver && hdev->driver->resume)
2547 return hdev->driver->resume(hdev);
2548
2549 return 0;
2550 }
2551 EXPORT_SYMBOL_GPL(hid_driver_resume);
2552 #endif /* CONFIG_PM */
2553
2554 struct hid_dynid {
2555 struct list_head list;
2556 struct hid_device_id id;
2557 };
2558
2559 /**
2560 * new_id_store - add a new HID device ID to this driver and re-probe devices
2561 * @drv: target device driver
2562 * @buf: buffer for scanning device ID data
2563 * @count: input size
2564 *
2565 * Adds a new dynamic hid device ID to this driver,
2566 * and causes the driver to probe for all devices again.
2567 */
new_id_store(struct device_driver * drv,const char * buf,size_t count)2568 static ssize_t new_id_store(struct device_driver *drv, const char *buf,
2569 size_t count)
2570 {
2571 struct hid_driver *hdrv = to_hid_driver(drv);
2572 struct hid_dynid *dynid;
2573 __u32 bus, vendor, product;
2574 unsigned long driver_data = 0;
2575 int ret;
2576
2577 ret = sscanf(buf, "%x %x %x %lx",
2578 &bus, &vendor, &product, &driver_data);
2579 if (ret < 3)
2580 return -EINVAL;
2581
2582 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
2583 if (!dynid)
2584 return -ENOMEM;
2585
2586 dynid->id.bus = bus;
2587 dynid->id.group = HID_GROUP_ANY;
2588 dynid->id.vendor = vendor;
2589 dynid->id.product = product;
2590 dynid->id.driver_data = driver_data;
2591
2592 spin_lock(&hdrv->dyn_lock);
2593 list_add_tail(&dynid->list, &hdrv->dyn_list);
2594 spin_unlock(&hdrv->dyn_lock);
2595
2596 ret = driver_attach(&hdrv->driver);
2597
2598 return ret ? : count;
2599 }
2600 static DRIVER_ATTR_WO(new_id);
2601
2602 static struct attribute *hid_drv_attrs[] = {
2603 &driver_attr_new_id.attr,
2604 NULL,
2605 };
2606 ATTRIBUTE_GROUPS(hid_drv);
2607
hid_free_dynids(struct hid_driver * hdrv)2608 static void hid_free_dynids(struct hid_driver *hdrv)
2609 {
2610 struct hid_dynid *dynid, *n;
2611
2612 spin_lock(&hdrv->dyn_lock);
2613 list_for_each_entry_safe(dynid, n, &hdrv->dyn_list, list) {
2614 list_del(&dynid->list);
2615 kfree(dynid);
2616 }
2617 spin_unlock(&hdrv->dyn_lock);
2618 }
2619
hid_match_device(struct hid_device * hdev,struct hid_driver * hdrv)2620 const struct hid_device_id *hid_match_device(struct hid_device *hdev,
2621 struct hid_driver *hdrv)
2622 {
2623 struct hid_dynid *dynid;
2624
2625 spin_lock(&hdrv->dyn_lock);
2626 list_for_each_entry(dynid, &hdrv->dyn_list, list) {
2627 if (hid_match_one_id(hdev, &dynid->id)) {
2628 spin_unlock(&hdrv->dyn_lock);
2629 return &dynid->id;
2630 }
2631 }
2632 spin_unlock(&hdrv->dyn_lock);
2633
2634 return hid_match_id(hdev, hdrv->id_table);
2635 }
2636 EXPORT_SYMBOL_GPL(hid_match_device);
2637
hid_bus_match(struct device * dev,const struct device_driver * drv)2638 static int hid_bus_match(struct device *dev, const struct device_driver *drv)
2639 {
2640 struct hid_driver *hdrv = to_hid_driver(drv);
2641 struct hid_device *hdev = to_hid_device(dev);
2642
2643 return hid_match_device(hdev, hdrv) != NULL;
2644 }
2645
2646 /**
2647 * hid_compare_device_paths - check if both devices share the same path
2648 * @hdev_a: hid device
2649 * @hdev_b: hid device
2650 * @separator: char to use as separator
2651 *
2652 * Check if two devices share the same path up to the last occurrence of
2653 * the separator char. Both paths must exist (i.e., zero-length paths
2654 * don't match).
2655 */
hid_compare_device_paths(struct hid_device * hdev_a,struct hid_device * hdev_b,char separator)2656 bool hid_compare_device_paths(struct hid_device *hdev_a,
2657 struct hid_device *hdev_b, char separator)
2658 {
2659 int n1 = strrchr(hdev_a->phys, separator) - hdev_a->phys;
2660 int n2 = strrchr(hdev_b->phys, separator) - hdev_b->phys;
2661
2662 if (n1 != n2 || n1 <= 0 || n2 <= 0)
2663 return false;
2664
2665 return !strncmp(hdev_a->phys, hdev_b->phys, n1);
2666 }
2667 EXPORT_SYMBOL_GPL(hid_compare_device_paths);
2668
hid_check_device_match(struct hid_device * hdev,struct hid_driver * hdrv,const struct hid_device_id ** id)2669 static bool hid_check_device_match(struct hid_device *hdev,
2670 struct hid_driver *hdrv,
2671 const struct hid_device_id **id)
2672 {
2673 *id = hid_match_device(hdev, hdrv);
2674 if (!*id)
2675 return false;
2676
2677 if (hdrv->match)
2678 return hdrv->match(hdev, hid_ignore_special_drivers);
2679
2680 /*
2681 * hid-generic implements .match(), so we must be dealing with a
2682 * different HID driver here, and can simply check if
2683 * hid_ignore_special_drivers or HID_QUIRK_IGNORE_SPECIAL_DRIVER
2684 * are set or not.
2685 */
2686 return !hid_ignore_special_drivers && !(hdev->quirks & HID_QUIRK_IGNORE_SPECIAL_DRIVER);
2687 }
2688
__hid_device_probe(struct hid_device * hdev,struct hid_driver * hdrv)2689 static int __hid_device_probe(struct hid_device *hdev, struct hid_driver *hdrv)
2690 {
2691 const struct hid_device_id *id;
2692 int ret;
2693
2694 if (!hdev->bpf_rsize) {
2695 /* in case a bpf program gets detached, we need to free the old one */
2696 hid_free_bpf_rdesc(hdev);
2697
2698 /* keep this around so we know we called it once */
2699 hdev->bpf_rsize = hdev->dev_rsize;
2700
2701 /* call_hid_bpf_rdesc_fixup will always return a valid pointer */
2702 hdev->bpf_rdesc = call_hid_bpf_rdesc_fixup(hdev, hdev->dev_rdesc,
2703 &hdev->bpf_rsize);
2704 }
2705
2706 if (!hid_check_device_match(hdev, hdrv, &id))
2707 return -ENODEV;
2708
2709 hdev->devres_group_id = devres_open_group(&hdev->dev, NULL, GFP_KERNEL);
2710 if (!hdev->devres_group_id)
2711 return -ENOMEM;
2712
2713 /* reset the quirks that has been previously set */
2714 hdev->quirks = hid_lookup_quirk(hdev);
2715 hdev->driver = hdrv;
2716
2717 if (hdrv->probe) {
2718 ret = hdrv->probe(hdev, id);
2719 } else { /* default probe */
2720 ret = hid_open_report(hdev);
2721 if (!ret)
2722 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
2723 }
2724
2725 /*
2726 * Note that we are not closing the devres group opened above so
2727 * even resources that were attached to the device after probe is
2728 * run are released when hid_device_remove() is executed. This is
2729 * needed as some drivers would allocate additional resources,
2730 * for example when updating firmware.
2731 */
2732
2733 if (ret) {
2734 devres_release_group(&hdev->dev, hdev->devres_group_id);
2735 hid_close_report(hdev);
2736 hdev->driver = NULL;
2737 }
2738
2739 return ret;
2740 }
2741
hid_device_probe(struct device * dev)2742 static int hid_device_probe(struct device *dev)
2743 {
2744 struct hid_device *hdev = to_hid_device(dev);
2745 struct hid_driver *hdrv = to_hid_driver(dev->driver);
2746 int ret = 0;
2747
2748 if (down_interruptible(&hdev->driver_input_lock))
2749 return -EINTR;
2750
2751 hdev->io_started = false;
2752 clear_bit(ffs(HID_STAT_REPROBED), &hdev->status);
2753
2754 if (!hdev->driver)
2755 ret = __hid_device_probe(hdev, hdrv);
2756
2757 if (!hdev->io_started)
2758 up(&hdev->driver_input_lock);
2759
2760 return ret;
2761 }
2762
hid_device_remove(struct device * dev)2763 static void hid_device_remove(struct device *dev)
2764 {
2765 struct hid_device *hdev = to_hid_device(dev);
2766 struct hid_driver *hdrv;
2767
2768 down(&hdev->driver_input_lock);
2769 hdev->io_started = false;
2770
2771 hdrv = hdev->driver;
2772 if (hdrv) {
2773 if (hdrv->remove)
2774 hdrv->remove(hdev);
2775 else /* default remove */
2776 hid_hw_stop(hdev);
2777
2778 /* Release all devres resources allocated by the driver */
2779 devres_release_group(&hdev->dev, hdev->devres_group_id);
2780
2781 hid_close_report(hdev);
2782 hdev->driver = NULL;
2783 }
2784
2785 if (!hdev->io_started)
2786 up(&hdev->driver_input_lock);
2787 }
2788
modalias_show(struct device * dev,struct device_attribute * a,char * buf)2789 static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
2790 char *buf)
2791 {
2792 struct hid_device *hdev = container_of(dev, struct hid_device, dev);
2793
2794 return scnprintf(buf, PAGE_SIZE, "hid:b%04Xg%04Xv%08Xp%08X\n",
2795 hdev->bus, hdev->group, hdev->vendor, hdev->product);
2796 }
2797 static DEVICE_ATTR_RO(modalias);
2798
2799 static struct attribute *hid_dev_attrs[] = {
2800 &dev_attr_modalias.attr,
2801 NULL,
2802 };
2803 static struct bin_attribute *hid_dev_bin_attrs[] = {
2804 &dev_bin_attr_report_desc,
2805 NULL
2806 };
2807 static const struct attribute_group hid_dev_group = {
2808 .attrs = hid_dev_attrs,
2809 .bin_attrs = hid_dev_bin_attrs,
2810 };
2811 __ATTRIBUTE_GROUPS(hid_dev);
2812
hid_uevent(const struct device * dev,struct kobj_uevent_env * env)2813 static int hid_uevent(const struct device *dev, struct kobj_uevent_env *env)
2814 {
2815 const struct hid_device *hdev = to_hid_device(dev);
2816
2817 if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X",
2818 hdev->bus, hdev->vendor, hdev->product))
2819 return -ENOMEM;
2820
2821 if (add_uevent_var(env, "HID_NAME=%s", hdev->name))
2822 return -ENOMEM;
2823
2824 if (add_uevent_var(env, "HID_PHYS=%s", hdev->phys))
2825 return -ENOMEM;
2826
2827 if (add_uevent_var(env, "HID_UNIQ=%s", hdev->uniq))
2828 return -ENOMEM;
2829
2830 if (add_uevent_var(env, "MODALIAS=hid:b%04Xg%04Xv%08Xp%08X",
2831 hdev->bus, hdev->group, hdev->vendor, hdev->product))
2832 return -ENOMEM;
2833
2834 return 0;
2835 }
2836
2837 const struct bus_type hid_bus_type = {
2838 .name = "hid",
2839 .dev_groups = hid_dev_groups,
2840 .drv_groups = hid_drv_groups,
2841 .match = hid_bus_match,
2842 .probe = hid_device_probe,
2843 .remove = hid_device_remove,
2844 .uevent = hid_uevent,
2845 };
2846 EXPORT_SYMBOL(hid_bus_type);
2847
hid_add_device(struct hid_device * hdev)2848 int hid_add_device(struct hid_device *hdev)
2849 {
2850 static atomic_t id = ATOMIC_INIT(0);
2851 int ret;
2852
2853 if (WARN_ON(hdev->status & HID_STAT_ADDED))
2854 return -EBUSY;
2855
2856 hdev->quirks = hid_lookup_quirk(hdev);
2857
2858 /* we need to kill them here, otherwise they will stay allocated to
2859 * wait for coming driver */
2860 if (hid_ignore(hdev))
2861 return -ENODEV;
2862
2863 /*
2864 * Check for the mandatory transport channel.
2865 */
2866 if (!hdev->ll_driver->raw_request) {
2867 hid_err(hdev, "transport driver missing .raw_request()\n");
2868 return -EINVAL;
2869 }
2870
2871 /*
2872 * Read the device report descriptor once and use as template
2873 * for the driver-specific modifications.
2874 */
2875 ret = hdev->ll_driver->parse(hdev);
2876 if (ret)
2877 return ret;
2878 if (!hdev->dev_rdesc)
2879 return -ENODEV;
2880
2881 /*
2882 * Scan generic devices for group information
2883 */
2884 if (hid_ignore_special_drivers) {
2885 hdev->group = HID_GROUP_GENERIC;
2886 } else if (!hdev->group &&
2887 !(hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)) {
2888 ret = hid_scan_report(hdev);
2889 if (ret)
2890 hid_warn(hdev, "bad device descriptor (%d)\n", ret);
2891 }
2892
2893 hdev->id = atomic_inc_return(&id);
2894
2895 /* XXX hack, any other cleaner solution after the driver core
2896 * is converted to allow more than 20 bytes as the device name? */
2897 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
2898 hdev->vendor, hdev->product, hdev->id);
2899
2900 hid_debug_register(hdev, dev_name(&hdev->dev));
2901 ret = device_add(&hdev->dev);
2902 if (!ret)
2903 hdev->status |= HID_STAT_ADDED;
2904 else
2905 hid_debug_unregister(hdev);
2906
2907 return ret;
2908 }
2909 EXPORT_SYMBOL_GPL(hid_add_device);
2910
2911 /**
2912 * hid_allocate_device - allocate new hid device descriptor
2913 *
2914 * Allocate and initialize hid device, so that hid_destroy_device might be
2915 * used to free it.
2916 *
2917 * New hid_device pointer is returned on success, otherwise ERR_PTR encoded
2918 * error value.
2919 */
hid_allocate_device(void)2920 struct hid_device *hid_allocate_device(void)
2921 {
2922 struct hid_device *hdev;
2923 int ret = -ENOMEM;
2924
2925 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2926 if (hdev == NULL)
2927 return ERR_PTR(ret);
2928
2929 device_initialize(&hdev->dev);
2930 hdev->dev.release = hid_device_release;
2931 hdev->dev.bus = &hid_bus_type;
2932 device_enable_async_suspend(&hdev->dev);
2933
2934 hid_close_report(hdev);
2935
2936 init_waitqueue_head(&hdev->debug_wait);
2937 INIT_LIST_HEAD(&hdev->debug_list);
2938 spin_lock_init(&hdev->debug_list_lock);
2939 sema_init(&hdev->driver_input_lock, 1);
2940 mutex_init(&hdev->ll_open_lock);
2941 kref_init(&hdev->ref);
2942
2943 ret = hid_bpf_device_init(hdev);
2944 if (ret)
2945 goto out_err;
2946
2947 return hdev;
2948
2949 out_err:
2950 hid_destroy_device(hdev);
2951 return ERR_PTR(ret);
2952 }
2953 EXPORT_SYMBOL_GPL(hid_allocate_device);
2954
hid_remove_device(struct hid_device * hdev)2955 static void hid_remove_device(struct hid_device *hdev)
2956 {
2957 if (hdev->status & HID_STAT_ADDED) {
2958 device_del(&hdev->dev);
2959 hid_debug_unregister(hdev);
2960 hdev->status &= ~HID_STAT_ADDED;
2961 }
2962 hid_free_bpf_rdesc(hdev);
2963 kfree(hdev->dev_rdesc);
2964 hdev->dev_rdesc = NULL;
2965 hdev->dev_rsize = 0;
2966 hdev->bpf_rsize = 0;
2967 }
2968
2969 /**
2970 * hid_destroy_device - free previously allocated device
2971 *
2972 * @hdev: hid device
2973 *
2974 * If you allocate hid_device through hid_allocate_device, you should ever
2975 * free by this function.
2976 */
hid_destroy_device(struct hid_device * hdev)2977 void hid_destroy_device(struct hid_device *hdev)
2978 {
2979 hid_bpf_destroy_device(hdev);
2980 hid_remove_device(hdev);
2981 put_device(&hdev->dev);
2982 }
2983 EXPORT_SYMBOL_GPL(hid_destroy_device);
2984
2985
__hid_bus_reprobe_drivers(struct device * dev,void * data)2986 static int __hid_bus_reprobe_drivers(struct device *dev, void *data)
2987 {
2988 struct hid_driver *hdrv = data;
2989 struct hid_device *hdev = to_hid_device(dev);
2990
2991 if (hdev->driver == hdrv &&
2992 !hdrv->match(hdev, hid_ignore_special_drivers) &&
2993 !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
2994 return device_reprobe(dev);
2995
2996 return 0;
2997 }
2998
__hid_bus_driver_added(struct device_driver * drv,void * data)2999 static int __hid_bus_driver_added(struct device_driver *drv, void *data)
3000 {
3001 struct hid_driver *hdrv = to_hid_driver(drv);
3002
3003 if (hdrv->match) {
3004 bus_for_each_dev(&hid_bus_type, NULL, hdrv,
3005 __hid_bus_reprobe_drivers);
3006 }
3007
3008 return 0;
3009 }
3010
__bus_removed_driver(struct device_driver * drv,void * data)3011 static int __bus_removed_driver(struct device_driver *drv, void *data)
3012 {
3013 return bus_rescan_devices(&hid_bus_type);
3014 }
3015
__hid_register_driver(struct hid_driver * hdrv,struct module * owner,const char * mod_name)3016 int __hid_register_driver(struct hid_driver *hdrv, struct module *owner,
3017 const char *mod_name)
3018 {
3019 int ret;
3020
3021 hdrv->driver.name = hdrv->name;
3022 hdrv->driver.bus = &hid_bus_type;
3023 hdrv->driver.owner = owner;
3024 hdrv->driver.mod_name = mod_name;
3025
3026 INIT_LIST_HEAD(&hdrv->dyn_list);
3027 spin_lock_init(&hdrv->dyn_lock);
3028
3029 ret = driver_register(&hdrv->driver);
3030
3031 if (ret == 0)
3032 bus_for_each_drv(&hid_bus_type, NULL, NULL,
3033 __hid_bus_driver_added);
3034
3035 return ret;
3036 }
3037 EXPORT_SYMBOL_GPL(__hid_register_driver);
3038
hid_unregister_driver(struct hid_driver * hdrv)3039 void hid_unregister_driver(struct hid_driver *hdrv)
3040 {
3041 driver_unregister(&hdrv->driver);
3042 hid_free_dynids(hdrv);
3043
3044 bus_for_each_drv(&hid_bus_type, NULL, hdrv, __bus_removed_driver);
3045 }
3046 EXPORT_SYMBOL_GPL(hid_unregister_driver);
3047
hid_check_keys_pressed(struct hid_device * hid)3048 int hid_check_keys_pressed(struct hid_device *hid)
3049 {
3050 struct hid_input *hidinput;
3051 int i;
3052
3053 if (!(hid->claimed & HID_CLAIMED_INPUT))
3054 return 0;
3055
3056 list_for_each_entry(hidinput, &hid->inputs, list) {
3057 for (i = 0; i < BITS_TO_LONGS(KEY_MAX); i++)
3058 if (hidinput->input->key[i])
3059 return 1;
3060 }
3061
3062 return 0;
3063 }
3064 EXPORT_SYMBOL_GPL(hid_check_keys_pressed);
3065
3066 #ifdef CONFIG_HID_BPF
3067 static const struct hid_ops __hid_ops = {
3068 .hid_get_report = hid_get_report,
3069 .hid_hw_raw_request = __hid_hw_raw_request,
3070 .hid_hw_output_report = __hid_hw_output_report,
3071 .hid_input_report = __hid_input_report,
3072 .owner = THIS_MODULE,
3073 .bus_type = &hid_bus_type,
3074 };
3075 #endif
3076
hid_init(void)3077 static int __init hid_init(void)
3078 {
3079 int ret;
3080
3081 ret = bus_register(&hid_bus_type);
3082 if (ret) {
3083 pr_err("can't register hid bus\n");
3084 goto err;
3085 }
3086
3087 #ifdef CONFIG_HID_BPF
3088 hid_ops = &__hid_ops;
3089 #endif
3090
3091 ret = hidraw_init();
3092 if (ret)
3093 goto err_bus;
3094
3095 hid_debug_init();
3096
3097 return 0;
3098 err_bus:
3099 bus_unregister(&hid_bus_type);
3100 err:
3101 return ret;
3102 }
3103
hid_exit(void)3104 static void __exit hid_exit(void)
3105 {
3106 #ifdef CONFIG_HID_BPF
3107 hid_ops = NULL;
3108 #endif
3109 hid_debug_exit();
3110 hidraw_exit();
3111 bus_unregister(&hid_bus_type);
3112 hid_quirks_exit(HID_BUS_ANY);
3113 }
3114
3115 module_init(hid_init);
3116 module_exit(hid_exit);
3117
3118 MODULE_AUTHOR("Andreas Gal");
3119 MODULE_AUTHOR("Vojtech Pavlik");
3120 MODULE_AUTHOR("Jiri Kosina");
3121 MODULE_DESCRIPTION("HID support for Linux");
3122 MODULE_LICENSE("GPL");
3123