1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * HID support for Linux
4 *
5 * Copyright (c) 1999 Andreas Gal
6 * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
7 * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
8 * Copyright (c) 2006-2012 Jiri Kosina
9 */
10
11 /*
12 */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/list.h>
21 #include <linux/mm.h>
22 #include <linux/spinlock.h>
23 #include <linux/unaligned.h>
24 #include <asm/byteorder.h>
25 #include <linux/input.h>
26 #include <linux/wait.h>
27 #include <linux/vmalloc.h>
28 #include <linux/sched.h>
29 #include <linux/semaphore.h>
30
31 #include <linux/hid.h>
32 #include <linux/hiddev.h>
33 #include <linux/hid-debug.h>
34 #include <linux/hidraw.h>
35
36 #include "hid-ids.h"
37
38 /*
39 * Version Information
40 */
41
42 #define DRIVER_DESC "HID core driver"
43
44 static int hid_ignore_special_drivers = 0;
45 module_param_named(ignore_special_drivers, hid_ignore_special_drivers, int, 0600);
46 MODULE_PARM_DESC(ignore_special_drivers, "Ignore any special drivers and handle all devices by generic driver");
47
48 /*
49 * Convert a signed n-bit integer to signed 32-bit integer.
50 */
51
snto32(__u32 value,unsigned int n)52 static s32 snto32(__u32 value, unsigned int n)
53 {
54 if (!value || !n)
55 return 0;
56
57 if (n > 32)
58 n = 32;
59
60 return sign_extend32(value, n - 1);
61 }
62
63 /*
64 * Convert a signed 32-bit integer to a signed n-bit integer.
65 */
66
s32ton(__s32 value,unsigned int n)67 static u32 s32ton(__s32 value, unsigned int n)
68 {
69 s32 a;
70
71 if (!value || !n)
72 return 0;
73
74 if (n > 32)
75 n = 32;
76
77 a = value >> (n - 1);
78 if (a && a != -1)
79 return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1;
80 return value & ((1 << n) - 1);
81 }
82
83 /*
84 * Register a new report for a device.
85 */
86
hid_register_report(struct hid_device * device,enum hid_report_type type,unsigned int id,unsigned int application)87 struct hid_report *hid_register_report(struct hid_device *device,
88 enum hid_report_type type, unsigned int id,
89 unsigned int application)
90 {
91 struct hid_report_enum *report_enum = device->report_enum + type;
92 struct hid_report *report;
93
94 if (id >= HID_MAX_IDS)
95 return NULL;
96 if (report_enum->report_id_hash[id])
97 return report_enum->report_id_hash[id];
98
99 report = kzalloc_obj(struct hid_report);
100 if (!report)
101 return NULL;
102
103 if (id != 0)
104 report_enum->numbered = 1;
105
106 report->id = id;
107 report->type = type;
108 report->size = 0;
109 report->device = device;
110 report->application = application;
111 report_enum->report_id_hash[id] = report;
112
113 list_add_tail(&report->list, &report_enum->report_list);
114 INIT_LIST_HEAD(&report->field_entry_list);
115
116 return report;
117 }
118 EXPORT_SYMBOL_GPL(hid_register_report);
119
120 /*
121 * Register a new field for this report.
122 */
123
hid_register_field(struct hid_report * report,unsigned usages)124 static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages)
125 {
126 struct hid_field *field;
127
128 if (report->maxfield == HID_MAX_FIELDS) {
129 hid_err(report->device, "too many fields in report\n");
130 return NULL;
131 }
132
133 field = kvzalloc((sizeof(struct hid_field) +
134 usages * sizeof(struct hid_usage) +
135 3 * usages * sizeof(unsigned int)), GFP_KERNEL);
136 if (!field)
137 return NULL;
138
139 field->index = report->maxfield++;
140 report->field[field->index] = field;
141 field->usage = (struct hid_usage *)(field + 1);
142 field->value = (s32 *)(field->usage + usages);
143 field->new_value = (s32 *)(field->value + usages);
144 field->usages_priorities = (s32 *)(field->new_value + usages);
145 field->report = report;
146
147 return field;
148 }
149
150 /*
151 * Open a collection. The type/usage is pushed on the stack.
152 */
153
open_collection(struct hid_parser * parser,unsigned type)154 static int open_collection(struct hid_parser *parser, unsigned type)
155 {
156 struct hid_collection *collection;
157 unsigned usage;
158 int collection_index;
159
160 usage = parser->local.usage[0];
161
162 if (parser->collection_stack_ptr == parser->collection_stack_size) {
163 unsigned int *collection_stack;
164 unsigned int new_size = parser->collection_stack_size +
165 HID_COLLECTION_STACK_SIZE;
166
167 collection_stack = krealloc(parser->collection_stack,
168 new_size * sizeof(unsigned int),
169 GFP_KERNEL);
170 if (!collection_stack)
171 return -ENOMEM;
172
173 parser->collection_stack = collection_stack;
174 parser->collection_stack_size = new_size;
175 }
176
177 if (parser->device->maxcollection == parser->device->collection_size) {
178 collection = kmalloc(
179 array3_size(sizeof(struct hid_collection),
180 parser->device->collection_size,
181 2),
182 GFP_KERNEL);
183 if (collection == NULL) {
184 hid_err(parser->device, "failed to reallocate collection array\n");
185 return -ENOMEM;
186 }
187 memcpy(collection, parser->device->collection,
188 sizeof(struct hid_collection) *
189 parser->device->collection_size);
190 memset(collection + parser->device->collection_size, 0,
191 sizeof(struct hid_collection) *
192 parser->device->collection_size);
193 kfree(parser->device->collection);
194 parser->device->collection = collection;
195 parser->device->collection_size *= 2;
196 }
197
198 parser->collection_stack[parser->collection_stack_ptr++] =
199 parser->device->maxcollection;
200
201 collection_index = parser->device->maxcollection++;
202 collection = parser->device->collection + collection_index;
203 collection->type = type;
204 collection->usage = usage;
205 collection->level = parser->collection_stack_ptr - 1;
206 collection->parent_idx = (collection->level == 0) ? -1 :
207 parser->collection_stack[collection->level - 1];
208
209 if (type == HID_COLLECTION_APPLICATION)
210 parser->device->maxapplication++;
211
212 return 0;
213 }
214
215 /*
216 * Close a collection.
217 */
218
close_collection(struct hid_parser * parser)219 static int close_collection(struct hid_parser *parser)
220 {
221 if (!parser->collection_stack_ptr) {
222 hid_err(parser->device, "collection stack underflow\n");
223 return -EINVAL;
224 }
225 parser->collection_stack_ptr--;
226 return 0;
227 }
228
229 /*
230 * Climb up the stack, search for the specified collection type
231 * and return the usage.
232 */
233
hid_lookup_collection(struct hid_parser * parser,unsigned type)234 static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
235 {
236 struct hid_collection *collection = parser->device->collection;
237 int n;
238
239 for (n = parser->collection_stack_ptr - 1; n >= 0; n--) {
240 unsigned index = parser->collection_stack[n];
241 if (collection[index].type == type)
242 return collection[index].usage;
243 }
244 return 0; /* we know nothing about this usage type */
245 }
246
247 /*
248 * Concatenate usage which defines 16 bits or less with the
249 * currently defined usage page to form a 32 bit usage
250 */
251
complete_usage(struct hid_parser * parser,unsigned int index)252 static void complete_usage(struct hid_parser *parser, unsigned int index)
253 {
254 parser->local.usage[index] &= 0xFFFF;
255 parser->local.usage[index] |=
256 (parser->global.usage_page & 0xFFFF) << 16;
257 }
258
259 /*
260 * Add a usage to the temporary parser table.
261 */
262
hid_add_usage(struct hid_parser * parser,unsigned usage,u8 size)263 static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size)
264 {
265 if (parser->local.usage_index >= HID_MAX_USAGES) {
266 hid_err(parser->device, "usage index exceeded\n");
267 return -1;
268 }
269 parser->local.usage[parser->local.usage_index] = usage;
270
271 /*
272 * If Usage item only includes usage id, concatenate it with
273 * currently defined usage page
274 */
275 if (size <= 2)
276 complete_usage(parser, parser->local.usage_index);
277
278 parser->local.usage_size[parser->local.usage_index] = size;
279 parser->local.collection_index[parser->local.usage_index] =
280 parser->collection_stack_ptr ?
281 parser->collection_stack[parser->collection_stack_ptr - 1] : 0;
282 parser->local.usage_index++;
283 return 0;
284 }
285
286 /*
287 * Register a new field for this report.
288 */
289
hid_add_field(struct hid_parser * parser,unsigned report_type,unsigned flags)290 static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsigned flags)
291 {
292 struct hid_report *report;
293 struct hid_field *field;
294 unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
295 unsigned int usages;
296 unsigned int offset;
297 unsigned int i;
298 unsigned int application;
299
300 application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION);
301
302 report = hid_register_report(parser->device, report_type,
303 parser->global.report_id, application);
304 if (!report) {
305 hid_err(parser->device, "hid_register_report failed\n");
306 return -1;
307 }
308
309 /* Handle both signed and unsigned cases properly */
310 if ((parser->global.logical_minimum < 0 &&
311 parser->global.logical_maximum <
312 parser->global.logical_minimum) ||
313 (parser->global.logical_minimum >= 0 &&
314 (__u32)parser->global.logical_maximum <
315 (__u32)parser->global.logical_minimum)) {
316 dbg_hid("logical range invalid 0x%x 0x%x\n",
317 parser->global.logical_minimum,
318 parser->global.logical_maximum);
319 return -1;
320 }
321
322 offset = report->size;
323 report->size += parser->global.report_size * parser->global.report_count;
324
325 if (parser->device->ll_driver->max_buffer_size)
326 max_buffer_size = parser->device->ll_driver->max_buffer_size;
327
328 /* Total size check: Allow for possible report index byte */
329 if (report->size > (max_buffer_size - 1) << 3) {
330 hid_err(parser->device, "report is too long\n");
331 return -1;
332 }
333
334 if (!parser->local.usage_index) /* Ignore padding fields */
335 return 0;
336
337 usages = max_t(unsigned, parser->local.usage_index,
338 parser->global.report_count);
339
340 field = hid_register_field(report, usages);
341 if (!field)
342 return 0;
343
344 field->physical = hid_lookup_collection(parser, HID_COLLECTION_PHYSICAL);
345 field->logical = hid_lookup_collection(parser, HID_COLLECTION_LOGICAL);
346 field->application = application;
347
348 for (i = 0; i < usages; i++) {
349 unsigned j = i;
350 /* Duplicate the last usage we parsed if we have excess values */
351 if (i >= parser->local.usage_index)
352 j = parser->local.usage_index - 1;
353 field->usage[i].hid = parser->local.usage[j];
354 field->usage[i].collection_index =
355 parser->local.collection_index[j];
356 field->usage[i].usage_index = i;
357 field->usage[i].resolution_multiplier = 1;
358 }
359
360 field->maxusage = usages;
361 field->flags = flags;
362 field->report_offset = offset;
363 field->report_type = report_type;
364 field->report_size = parser->global.report_size;
365 field->report_count = parser->global.report_count;
366 field->logical_minimum = parser->global.logical_minimum;
367 field->logical_maximum = parser->global.logical_maximum;
368 field->physical_minimum = parser->global.physical_minimum;
369 field->physical_maximum = parser->global.physical_maximum;
370 field->unit_exponent = parser->global.unit_exponent;
371 field->unit = parser->global.unit;
372
373 return 0;
374 }
375
376 /*
377 * Read data value from item.
378 */
379
item_udata(struct hid_item * item)380 static u32 item_udata(struct hid_item *item)
381 {
382 switch (item->size) {
383 case 1: return item->data.u8;
384 case 2: return item->data.u16;
385 case 4: return item->data.u32;
386 }
387 return 0;
388 }
389
item_sdata(struct hid_item * item)390 static s32 item_sdata(struct hid_item *item)
391 {
392 switch (item->size) {
393 case 1: return item->data.s8;
394 case 2: return item->data.s16;
395 case 4: return item->data.s32;
396 }
397 return 0;
398 }
399
400 /*
401 * Process a global item.
402 */
403
hid_parser_global(struct hid_parser * parser,struct hid_item * item)404 static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
405 {
406 __s32 raw_value;
407 switch (item->tag) {
408 case HID_GLOBAL_ITEM_TAG_PUSH:
409
410 if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) {
411 hid_err(parser->device, "global environment stack overflow\n");
412 return -1;
413 }
414
415 memcpy(parser->global_stack + parser->global_stack_ptr++,
416 &parser->global, sizeof(struct hid_global));
417 return 0;
418
419 case HID_GLOBAL_ITEM_TAG_POP:
420
421 if (!parser->global_stack_ptr) {
422 hid_err(parser->device, "global environment stack underflow\n");
423 return -1;
424 }
425
426 memcpy(&parser->global, parser->global_stack +
427 --parser->global_stack_ptr, sizeof(struct hid_global));
428 return 0;
429
430 case HID_GLOBAL_ITEM_TAG_USAGE_PAGE:
431 parser->global.usage_page = item_udata(item);
432 return 0;
433
434 case HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM:
435 parser->global.logical_minimum = item_sdata(item);
436 return 0;
437
438 case HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM:
439 if (parser->global.logical_minimum < 0)
440 parser->global.logical_maximum = item_sdata(item);
441 else
442 parser->global.logical_maximum = item_udata(item);
443 return 0;
444
445 case HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM:
446 parser->global.physical_minimum = item_sdata(item);
447 return 0;
448
449 case HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM:
450 if (parser->global.physical_minimum < 0)
451 parser->global.physical_maximum = item_sdata(item);
452 else
453 parser->global.physical_maximum = item_udata(item);
454 return 0;
455
456 case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT:
457 /* Many devices provide unit exponent as a two's complement
458 * nibble due to the common misunderstanding of HID
459 * specification 1.11, 6.2.2.7 Global Items. Attempt to handle
460 * both this and the standard encoding. */
461 raw_value = item_sdata(item);
462 if (!(raw_value & 0xfffffff0))
463 parser->global.unit_exponent = snto32(raw_value, 4);
464 else
465 parser->global.unit_exponent = raw_value;
466 return 0;
467
468 case HID_GLOBAL_ITEM_TAG_UNIT:
469 parser->global.unit = item_udata(item);
470 return 0;
471
472 case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
473 parser->global.report_size = item_udata(item);
474 if (parser->global.report_size > 256) {
475 hid_err(parser->device, "invalid report_size %d\n",
476 parser->global.report_size);
477 return -1;
478 }
479 return 0;
480
481 case HID_GLOBAL_ITEM_TAG_REPORT_COUNT:
482 parser->global.report_count = item_udata(item);
483 if (parser->global.report_count > HID_MAX_USAGES) {
484 hid_err(parser->device, "invalid report_count %d\n",
485 parser->global.report_count);
486 return -1;
487 }
488 return 0;
489
490 case HID_GLOBAL_ITEM_TAG_REPORT_ID:
491 parser->global.report_id = item_udata(item);
492 if (parser->global.report_id == 0 ||
493 parser->global.report_id >= HID_MAX_IDS) {
494 hid_err(parser->device, "report_id %u is invalid\n",
495 parser->global.report_id);
496 return -1;
497 }
498 return 0;
499
500 default:
501 hid_err(parser->device, "unknown global tag 0x%x\n", item->tag);
502 return -1;
503 }
504 }
505
506 /*
507 * Process a local item.
508 */
509
hid_parser_local(struct hid_parser * parser,struct hid_item * item)510 static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
511 {
512 __u32 data;
513 unsigned n;
514 __u32 count;
515
516 data = item_udata(item);
517
518 switch (item->tag) {
519 case HID_LOCAL_ITEM_TAG_DELIMITER:
520
521 if (data) {
522 /*
523 * We treat items before the first delimiter
524 * as global to all usage sets (branch 0).
525 * In the moment we process only these global
526 * items and the first delimiter set.
527 */
528 if (parser->local.delimiter_depth != 0) {
529 hid_err(parser->device, "nested delimiters\n");
530 return -1;
531 }
532 parser->local.delimiter_depth++;
533 parser->local.delimiter_branch++;
534 } else {
535 if (parser->local.delimiter_depth < 1) {
536 hid_err(parser->device, "bogus close delimiter\n");
537 return -1;
538 }
539 parser->local.delimiter_depth--;
540 }
541 return 0;
542
543 case HID_LOCAL_ITEM_TAG_USAGE:
544
545 if (parser->local.delimiter_branch > 1) {
546 dbg_hid("alternative usage ignored\n");
547 return 0;
548 }
549
550 return hid_add_usage(parser, data, item->size);
551
552 case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
553
554 if (parser->local.delimiter_branch > 1) {
555 dbg_hid("alternative usage ignored\n");
556 return 0;
557 }
558
559 parser->local.usage_minimum = data;
560 return 0;
561
562 case HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM:
563
564 if (parser->local.delimiter_branch > 1) {
565 dbg_hid("alternative usage ignored\n");
566 return 0;
567 }
568
569 count = data - parser->local.usage_minimum;
570 if (count + parser->local.usage_index >= HID_MAX_USAGES) {
571 /*
572 * We do not warn if the name is not set, we are
573 * actually pre-scanning the device.
574 */
575 if (dev_name(&parser->device->dev))
576 hid_warn(parser->device,
577 "ignoring exceeding usage max\n");
578 data = HID_MAX_USAGES - parser->local.usage_index +
579 parser->local.usage_minimum - 1;
580 if (data <= 0) {
581 hid_err(parser->device,
582 "no more usage index available\n");
583 return -1;
584 }
585 }
586
587 for (n = parser->local.usage_minimum; n <= data; n++)
588 if (hid_add_usage(parser, n, item->size)) {
589 dbg_hid("hid_add_usage failed\n");
590 return -1;
591 }
592 return 0;
593
594 default:
595
596 dbg_hid("unknown local item tag 0x%x\n", item->tag);
597 return 0;
598 }
599 return 0;
600 }
601
602 /*
603 * Concatenate Usage Pages into Usages where relevant:
604 * As per specification, 6.2.2.8: "When the parser encounters a main item it
605 * concatenates the last declared Usage Page with a Usage to form a complete
606 * usage value."
607 */
608
hid_concatenate_last_usage_page(struct hid_parser * parser)609 static void hid_concatenate_last_usage_page(struct hid_parser *parser)
610 {
611 int i;
612 unsigned int usage_page;
613 unsigned int current_page;
614
615 if (!parser->local.usage_index)
616 return;
617
618 usage_page = parser->global.usage_page;
619
620 /*
621 * Concatenate usage page again only if last declared Usage Page
622 * has not been already used in previous usages concatenation
623 */
624 for (i = parser->local.usage_index - 1; i >= 0; i--) {
625 if (parser->local.usage_size[i] > 2)
626 /* Ignore extended usages */
627 continue;
628
629 current_page = parser->local.usage[i] >> 16;
630 if (current_page == usage_page)
631 break;
632
633 complete_usage(parser, i);
634 }
635 }
636
637 /*
638 * Process a main item.
639 */
640
hid_parser_main(struct hid_parser * parser,struct hid_item * item)641 static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
642 {
643 __u32 data;
644 int ret;
645
646 hid_concatenate_last_usage_page(parser);
647
648 data = item_udata(item);
649
650 switch (item->tag) {
651 case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
652 ret = open_collection(parser, data & 0xff);
653 break;
654 case HID_MAIN_ITEM_TAG_END_COLLECTION:
655 ret = close_collection(parser);
656 break;
657 case HID_MAIN_ITEM_TAG_INPUT:
658 ret = hid_add_field(parser, HID_INPUT_REPORT, data);
659 break;
660 case HID_MAIN_ITEM_TAG_OUTPUT:
661 ret = hid_add_field(parser, HID_OUTPUT_REPORT, data);
662 break;
663 case HID_MAIN_ITEM_TAG_FEATURE:
664 ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
665 break;
666 default:
667 if (item->tag >= HID_MAIN_ITEM_TAG_RESERVED_MIN &&
668 item->tag <= HID_MAIN_ITEM_TAG_RESERVED_MAX)
669 hid_warn_ratelimited(parser->device, "reserved main item tag 0x%x\n", item->tag);
670 else
671 hid_warn_ratelimited(parser->device, "unknown main item tag 0x%x\n", item->tag);
672 ret = 0;
673 }
674
675 memset(&parser->local, 0, sizeof(parser->local)); /* Reset the local parser environment */
676
677 return ret;
678 }
679
680 /*
681 * Process a reserved item.
682 */
683
hid_parser_reserved(struct hid_parser * parser,struct hid_item * item)684 static int hid_parser_reserved(struct hid_parser *parser, struct hid_item *item)
685 {
686 dbg_hid("reserved item type, tag 0x%x\n", item->tag);
687 return 0;
688 }
689
690 /*
691 * Free a report and all registered fields. The field->usage and
692 * field->value table's are allocated behind the field, so we need
693 * only to free(field) itself.
694 */
695
hid_free_report(struct hid_report * report)696 static void hid_free_report(struct hid_report *report)
697 {
698 unsigned n;
699
700 kfree(report->field_entries);
701
702 for (n = 0; n < report->maxfield; n++)
703 kvfree(report->field[n]);
704 kfree(report);
705 }
706
707 /*
708 * Close report. This function returns the device
709 * state to the point prior to hid_open_report().
710 */
hid_close_report(struct hid_device * device)711 static void hid_close_report(struct hid_device *device)
712 {
713 unsigned i, j;
714
715 for (i = 0; i < HID_REPORT_TYPES; i++) {
716 struct hid_report_enum *report_enum = device->report_enum + i;
717
718 for (j = 0; j < HID_MAX_IDS; j++) {
719 struct hid_report *report = report_enum->report_id_hash[j];
720 if (report)
721 hid_free_report(report);
722 }
723 memset(report_enum, 0, sizeof(*report_enum));
724 INIT_LIST_HEAD(&report_enum->report_list);
725 }
726
727 /*
728 * If the HID driver had a rdesc_fixup() callback, dev->rdesc
729 * will be allocated by hid-core and needs to be freed.
730 * Otherwise, it is either equal to dev_rdesc or bpf_rdesc, in
731 * which cases it'll be freed later on device removal or destroy.
732 */
733 if (device->rdesc != device->dev_rdesc && device->rdesc != device->bpf_rdesc)
734 kfree(device->rdesc);
735 device->rdesc = NULL;
736 device->rsize = 0;
737
738 kfree(device->collection);
739 device->collection = NULL;
740 device->collection_size = 0;
741 device->maxcollection = 0;
742 device->maxapplication = 0;
743
744 device->status &= ~HID_STAT_PARSED;
745 }
746
hid_free_bpf_rdesc(struct hid_device * hdev)747 static inline void hid_free_bpf_rdesc(struct hid_device *hdev)
748 {
749 /* bpf_rdesc is either equal to dev_rdesc or allocated by call_hid_bpf_rdesc_fixup() */
750 if (hdev->bpf_rdesc != hdev->dev_rdesc)
751 kfree(hdev->bpf_rdesc);
752 hdev->bpf_rdesc = NULL;
753 }
754
755 /*
756 * Free a device structure, all reports, and all fields.
757 */
758
hiddev_free(struct kref * ref)759 void hiddev_free(struct kref *ref)
760 {
761 struct hid_device *hid = container_of(ref, struct hid_device, ref);
762
763 hid_close_report(hid);
764 hid_free_bpf_rdesc(hid);
765 kfree(hid->dev_rdesc);
766 kfree(hid);
767 }
768
hid_device_release(struct device * dev)769 static void hid_device_release(struct device *dev)
770 {
771 struct hid_device *hid = to_hid_device(dev);
772
773 kref_put(&hid->ref, hiddev_free);
774 }
775
776 /*
777 * Fetch a report description item from the data stream. We support long
778 * items, though they are not used yet.
779 */
780
fetch_item(const __u8 * start,const __u8 * end,struct hid_item * item)781 static const u8 *fetch_item(const __u8 *start, const __u8 *end, struct hid_item *item)
782 {
783 u8 b;
784
785 if ((end - start) <= 0)
786 return NULL;
787
788 b = *start++;
789
790 item->type = (b >> 2) & 3;
791 item->tag = (b >> 4) & 15;
792
793 if (item->tag == HID_ITEM_TAG_LONG) {
794
795 item->format = HID_ITEM_FORMAT_LONG;
796
797 if ((end - start) < 2)
798 return NULL;
799
800 item->size = *start++;
801 item->tag = *start++;
802
803 if ((end - start) < item->size)
804 return NULL;
805
806 item->data.longdata = start;
807 start += item->size;
808 return start;
809 }
810
811 item->format = HID_ITEM_FORMAT_SHORT;
812 item->size = BIT(b & 3) >> 1; /* 0, 1, 2, 3 -> 0, 1, 2, 4 */
813
814 if (end - start < item->size)
815 return NULL;
816
817 switch (item->size) {
818 case 0:
819 break;
820
821 case 1:
822 item->data.u8 = *start;
823 break;
824
825 case 2:
826 item->data.u16 = get_unaligned_le16(start);
827 break;
828
829 case 4:
830 item->data.u32 = get_unaligned_le32(start);
831 break;
832 }
833
834 return start + item->size;
835 }
836
hid_scan_input_usage(struct hid_parser * parser,u32 usage)837 static void hid_scan_input_usage(struct hid_parser *parser, u32 usage)
838 {
839 struct hid_device *hid = parser->device;
840
841 if (usage == HID_DG_CONTACTID)
842 hid->group = HID_GROUP_MULTITOUCH;
843 }
844
hid_scan_feature_usage(struct hid_parser * parser,u32 usage)845 static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage)
846 {
847 if (usage == 0xff0000c5 && parser->global.report_count == 256 &&
848 parser->global.report_size == 8)
849 parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
850
851 if (usage == 0xff0000c6 && parser->global.report_count == 1 &&
852 parser->global.report_size == 8)
853 parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
854 }
855
hid_scan_collection(struct hid_parser * parser,unsigned type)856 static void hid_scan_collection(struct hid_parser *parser, unsigned type)
857 {
858 struct hid_device *hid = parser->device;
859 int i;
860
861 if (((parser->global.usage_page << 16) == HID_UP_SENSOR) &&
862 (type == HID_COLLECTION_PHYSICAL ||
863 type == HID_COLLECTION_APPLICATION))
864 hid->group = HID_GROUP_SENSOR_HUB;
865
866 if (hid->vendor == USB_VENDOR_ID_MICROSOFT &&
867 hid->product == USB_DEVICE_ID_MS_POWER_COVER &&
868 hid->group == HID_GROUP_MULTITOUCH)
869 hid->group = HID_GROUP_GENERIC;
870
871 if ((parser->global.usage_page << 16) == HID_UP_GENDESK)
872 for (i = 0; i < parser->local.usage_index; i++)
873 if (parser->local.usage[i] == HID_GD_POINTER)
874 parser->scan_flags |= HID_SCAN_FLAG_GD_POINTER;
875
876 if ((parser->global.usage_page << 16) >= HID_UP_MSVENDOR)
877 parser->scan_flags |= HID_SCAN_FLAG_VENDOR_SPECIFIC;
878
879 if ((parser->global.usage_page << 16) == HID_UP_GOOGLEVENDOR)
880 for (i = 0; i < parser->local.usage_index; i++)
881 if (parser->local.usage[i] ==
882 (HID_UP_GOOGLEVENDOR | 0x0001))
883 parser->device->group =
884 HID_GROUP_VIVALDI;
885 }
886
hid_scan_main(struct hid_parser * parser,struct hid_item * item)887 static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
888 {
889 __u32 data;
890 int i;
891
892 hid_concatenate_last_usage_page(parser);
893
894 data = item_udata(item);
895
896 switch (item->tag) {
897 case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
898 hid_scan_collection(parser, data & 0xff);
899 break;
900 case HID_MAIN_ITEM_TAG_END_COLLECTION:
901 break;
902 case HID_MAIN_ITEM_TAG_INPUT:
903 /* ignore constant inputs, they will be ignored by hid-input */
904 if (data & HID_MAIN_ITEM_CONSTANT)
905 break;
906 for (i = 0; i < parser->local.usage_index; i++)
907 hid_scan_input_usage(parser, parser->local.usage[i]);
908 break;
909 case HID_MAIN_ITEM_TAG_OUTPUT:
910 break;
911 case HID_MAIN_ITEM_TAG_FEATURE:
912 for (i = 0; i < parser->local.usage_index; i++)
913 hid_scan_feature_usage(parser, parser->local.usage[i]);
914 break;
915 }
916
917 /* Reset the local parser environment */
918 memset(&parser->local, 0, sizeof(parser->local));
919
920 return 0;
921 }
922
923 /*
924 * Scan a report descriptor before the device is added to the bus.
925 * Sets device groups and other properties that determine what driver
926 * to load.
927 */
hid_scan_report(struct hid_device * hid)928 static int hid_scan_report(struct hid_device *hid)
929 {
930 struct hid_item item;
931 const __u8 *start = hid->dev_rdesc;
932 const __u8 *end = start + hid->dev_rsize;
933 static int (*dispatch_type[])(struct hid_parser *parser,
934 struct hid_item *item) = {
935 hid_scan_main,
936 hid_parser_global,
937 hid_parser_local,
938 hid_parser_reserved
939 };
940
941 struct hid_parser *parser __free(kvfree) = vzalloc(sizeof(*parser));
942 if (!parser)
943 return -ENOMEM;
944
945 parser->device = hid;
946 hid->group = HID_GROUP_GENERIC;
947
948 /*
949 * In case we are re-scanning after a BPF has been loaded,
950 * we need to use the bpf report descriptor, not the original one.
951 */
952 if (hid->bpf_rdesc && hid->bpf_rsize) {
953 start = hid->bpf_rdesc;
954 end = start + hid->bpf_rsize;
955 }
956
957 /*
958 * The parsing is simpler than the one in hid_open_report() as we should
959 * be robust against hid errors. Those errors will be raised by
960 * hid_open_report() anyway.
961 */
962 while ((start = fetch_item(start, end, &item)) != NULL)
963 dispatch_type[item.type](parser, &item);
964
965 /*
966 * Handle special flags set during scanning.
967 */
968 if ((parser->scan_flags & HID_SCAN_FLAG_MT_WIN_8) &&
969 (hid->group == HID_GROUP_MULTITOUCH))
970 hid->group = HID_GROUP_MULTITOUCH_WIN_8;
971
972 /*
973 * Vendor specific handlings
974 */
975 switch (hid->vendor) {
976 case USB_VENDOR_ID_WACOM:
977 hid->group = HID_GROUP_WACOM;
978 break;
979 case USB_VENDOR_ID_SYNAPTICS:
980 if (hid->group == HID_GROUP_GENERIC)
981 if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC)
982 && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER))
983 /*
984 * hid-rmi should take care of them,
985 * not hid-generic
986 */
987 hid->group = HID_GROUP_RMI;
988 break;
989 }
990
991 kfree(parser->collection_stack);
992 return 0;
993 }
994
995 /**
996 * hid_parse_report - parse device report
997 *
998 * @hid: hid device
999 * @start: report start
1000 * @size: report size
1001 *
1002 * Allocate the device report as read by the bus driver. This function should
1003 * only be called from parse() in ll drivers.
1004 */
hid_parse_report(struct hid_device * hid,const __u8 * start,unsigned size)1005 int hid_parse_report(struct hid_device *hid, const __u8 *start, unsigned size)
1006 {
1007 hid->dev_rdesc = kmemdup(start, size, GFP_KERNEL);
1008 if (!hid->dev_rdesc)
1009 return -ENOMEM;
1010 hid->dev_rsize = size;
1011 return 0;
1012 }
1013 EXPORT_SYMBOL_GPL(hid_parse_report);
1014
1015 static const char * const hid_report_names[] = {
1016 "HID_INPUT_REPORT",
1017 "HID_OUTPUT_REPORT",
1018 "HID_FEATURE_REPORT",
1019 };
1020 /**
1021 * hid_validate_values - validate existing device report's value indexes
1022 *
1023 * @hid: hid device
1024 * @type: which report type to examine
1025 * @id: which report ID to examine (0 for first)
1026 * @field_index: which report field to examine
1027 * @report_counts: expected number of values
1028 *
1029 * Validate the number of values in a given field of a given report, after
1030 * parsing.
1031 */
hid_validate_values(struct hid_device * hid,enum hid_report_type type,unsigned int id,unsigned int field_index,unsigned int report_counts)1032 struct hid_report *hid_validate_values(struct hid_device *hid,
1033 enum hid_report_type type, unsigned int id,
1034 unsigned int field_index,
1035 unsigned int report_counts)
1036 {
1037 struct hid_report *report;
1038
1039 if (type > HID_FEATURE_REPORT) {
1040 hid_err(hid, "invalid HID report type %u\n", type);
1041 return NULL;
1042 }
1043
1044 if (id >= HID_MAX_IDS) {
1045 hid_err(hid, "invalid HID report id %u\n", id);
1046 return NULL;
1047 }
1048
1049 /*
1050 * Explicitly not using hid_get_report() here since it depends on
1051 * ->numbered being checked, which may not always be the case when
1052 * drivers go to access report values.
1053 */
1054 if (id == 0) {
1055 /*
1056 * Validating on id 0 means we should examine the first
1057 * report in the list.
1058 */
1059 report = list_first_entry_or_null(
1060 &hid->report_enum[type].report_list,
1061 struct hid_report, list);
1062 } else {
1063 report = hid->report_enum[type].report_id_hash[id];
1064 }
1065 if (!report) {
1066 hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
1067 return NULL;
1068 }
1069 if (report->maxfield <= field_index) {
1070 hid_err(hid, "not enough fields in %s %u\n",
1071 hid_report_names[type], id);
1072 return NULL;
1073 }
1074 if (report->field[field_index]->report_count < report_counts) {
1075 hid_err(hid, "not enough values in %s %u field %u\n",
1076 hid_report_names[type], id, field_index);
1077 return NULL;
1078 }
1079 return report;
1080 }
1081 EXPORT_SYMBOL_GPL(hid_validate_values);
1082
hid_calculate_multiplier(struct hid_device * hid,struct hid_field * multiplier)1083 static int hid_calculate_multiplier(struct hid_device *hid,
1084 struct hid_field *multiplier)
1085 {
1086 int m;
1087 __s32 v = *multiplier->value;
1088 __s32 lmin = multiplier->logical_minimum;
1089 __s32 lmax = multiplier->logical_maximum;
1090 __s32 pmin = multiplier->physical_minimum;
1091 __s32 pmax = multiplier->physical_maximum;
1092
1093 /*
1094 * "Because OS implementations will generally divide the control's
1095 * reported count by the Effective Resolution Multiplier, designers
1096 * should take care not to establish a potential Effective
1097 * Resolution Multiplier of zero."
1098 * HID Usage Table, v1.12, Section 4.3.1, p31
1099 */
1100 if (lmax - lmin == 0)
1101 return 1;
1102 /*
1103 * Handling the unit exponent is left as an exercise to whoever
1104 * finds a device where that exponent is not 0.
1105 */
1106 m = ((v - lmin)/(lmax - lmin) * (pmax - pmin) + pmin);
1107 if (unlikely(multiplier->unit_exponent != 0)) {
1108 hid_warn(hid,
1109 "unsupported Resolution Multiplier unit exponent %d\n",
1110 multiplier->unit_exponent);
1111 }
1112
1113 /* There are no devices with an effective multiplier > 255 */
1114 if (unlikely(m == 0 || m > 255 || m < -255)) {
1115 hid_warn(hid, "unsupported Resolution Multiplier %d\n", m);
1116 m = 1;
1117 }
1118
1119 return m;
1120 }
1121
hid_apply_multiplier_to_field(struct hid_device * hid,struct hid_field * field,struct hid_collection * multiplier_collection,int effective_multiplier)1122 static void hid_apply_multiplier_to_field(struct hid_device *hid,
1123 struct hid_field *field,
1124 struct hid_collection *multiplier_collection,
1125 int effective_multiplier)
1126 {
1127 struct hid_collection *collection;
1128 struct hid_usage *usage;
1129 int i;
1130
1131 /*
1132 * If multiplier_collection is NULL, the multiplier applies
1133 * to all fields in the report.
1134 * Otherwise, it is the Logical Collection the multiplier applies to
1135 * but our field may be in a subcollection of that collection.
1136 */
1137 for (i = 0; i < field->maxusage; i++) {
1138 usage = &field->usage[i];
1139
1140 collection = &hid->collection[usage->collection_index];
1141 while (collection->parent_idx != -1 &&
1142 collection != multiplier_collection)
1143 collection = &hid->collection[collection->parent_idx];
1144
1145 if (collection->parent_idx != -1 ||
1146 multiplier_collection == NULL)
1147 usage->resolution_multiplier = effective_multiplier;
1148
1149 }
1150 }
1151
hid_apply_multiplier(struct hid_device * hid,struct hid_field * multiplier)1152 static void hid_apply_multiplier(struct hid_device *hid,
1153 struct hid_field *multiplier)
1154 {
1155 struct hid_report_enum *rep_enum;
1156 struct hid_report *rep;
1157 struct hid_field *field;
1158 struct hid_collection *multiplier_collection;
1159 int effective_multiplier;
1160 int i;
1161
1162 /*
1163 * "The Resolution Multiplier control must be contained in the same
1164 * Logical Collection as the control(s) to which it is to be applied.
1165 * If no Resolution Multiplier is defined, then the Resolution
1166 * Multiplier defaults to 1. If more than one control exists in a
1167 * Logical Collection, the Resolution Multiplier is associated with
1168 * all controls in the collection. If no Logical Collection is
1169 * defined, the Resolution Multiplier is associated with all
1170 * controls in the report."
1171 * HID Usage Table, v1.12, Section 4.3.1, p30
1172 *
1173 * Thus, search from the current collection upwards until we find a
1174 * logical collection. Then search all fields for that same parent
1175 * collection. Those are the fields the multiplier applies to.
1176 *
1177 * If we have more than one multiplier, it will overwrite the
1178 * applicable fields later.
1179 */
1180 multiplier_collection = &hid->collection[multiplier->usage->collection_index];
1181 while (multiplier_collection->parent_idx != -1 &&
1182 multiplier_collection->type != HID_COLLECTION_LOGICAL)
1183 multiplier_collection = &hid->collection[multiplier_collection->parent_idx];
1184 if (multiplier_collection->type != HID_COLLECTION_LOGICAL)
1185 multiplier_collection = NULL;
1186
1187 effective_multiplier = hid_calculate_multiplier(hid, multiplier);
1188
1189 rep_enum = &hid->report_enum[HID_INPUT_REPORT];
1190 list_for_each_entry(rep, &rep_enum->report_list, list) {
1191 for (i = 0; i < rep->maxfield; i++) {
1192 field = rep->field[i];
1193 hid_apply_multiplier_to_field(hid, field,
1194 multiplier_collection,
1195 effective_multiplier);
1196 }
1197 }
1198 }
1199
1200 /*
1201 * hid_setup_resolution_multiplier - set up all resolution multipliers
1202 *
1203 * @device: hid device
1204 *
1205 * Search for all Resolution Multiplier Feature Reports and apply their
1206 * value to all matching Input items. This only updates the internal struct
1207 * fields.
1208 *
1209 * The Resolution Multiplier is applied by the hardware. If the multiplier
1210 * is anything other than 1, the hardware will send pre-multiplied events
1211 * so that the same physical interaction generates an accumulated
1212 * accumulated_value = value * * multiplier
1213 * This may be achieved by sending
1214 * - "value * multiplier" for each event, or
1215 * - "value" but "multiplier" times as frequently, or
1216 * - a combination of the above
1217 * The only guarantee is that the same physical interaction always generates
1218 * an accumulated 'value * multiplier'.
1219 *
1220 * This function must be called before any event processing and after
1221 * any SetRequest to the Resolution Multiplier.
1222 */
hid_setup_resolution_multiplier(struct hid_device * hid)1223 void hid_setup_resolution_multiplier(struct hid_device *hid)
1224 {
1225 struct hid_report_enum *rep_enum;
1226 struct hid_report *rep;
1227 struct hid_usage *usage;
1228 int i, j;
1229
1230 rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
1231 list_for_each_entry(rep, &rep_enum->report_list, list) {
1232 for (i = 0; i < rep->maxfield; i++) {
1233 /* Ignore if report count is out of bounds. */
1234 if (rep->field[i]->report_count < 1)
1235 continue;
1236
1237 for (j = 0; j < rep->field[i]->maxusage; j++) {
1238 usage = &rep->field[i]->usage[j];
1239 if (usage->hid == HID_GD_RESOLUTION_MULTIPLIER)
1240 hid_apply_multiplier(hid,
1241 rep->field[i]);
1242 }
1243 }
1244 }
1245 }
1246 EXPORT_SYMBOL_GPL(hid_setup_resolution_multiplier);
1247
hid_parse_collections(struct hid_device * device)1248 static int hid_parse_collections(struct hid_device *device)
1249 {
1250 struct hid_item item;
1251 const u8 *start = device->rdesc;
1252 const u8 *end = start + device->rsize;
1253 const u8 *next;
1254 int ret;
1255 static typeof(hid_parser_main) (* const dispatch_type[]) = {
1256 hid_parser_main,
1257 hid_parser_global,
1258 hid_parser_local,
1259 hid_parser_reserved
1260 };
1261
1262 struct hid_parser *parser __free(kvfree) = vzalloc(sizeof(*parser));
1263 if (!parser)
1264 return -ENOMEM;
1265
1266 parser->device = device;
1267
1268 device->collection = kzalloc_objs(*device->collection,
1269 HID_DEFAULT_NUM_COLLECTIONS);
1270 if (!device->collection)
1271 return -ENOMEM;
1272
1273 device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
1274 for (unsigned int i = 0; i < HID_DEFAULT_NUM_COLLECTIONS; i++)
1275 device->collection[i].parent_idx = -1;
1276
1277 ret = -EINVAL;
1278 if (start == end) {
1279 hid_err(device, "rejecting 0-sized report descriptor\n");
1280 goto out;
1281 }
1282
1283 while ((next = fetch_item(start, end, &item)) != NULL) {
1284 start = next;
1285
1286 if (item.format != HID_ITEM_FORMAT_SHORT) {
1287 hid_err(device, "unexpected long global item\n");
1288 goto out;
1289 }
1290
1291 if (dispatch_type[item.type](parser, &item)) {
1292 hid_err(device, "item %u %u %u %u parsing failed\n",
1293 item.format,
1294 (unsigned int)item.size,
1295 (unsigned int)item.type,
1296 (unsigned int)item.tag);
1297 goto out;
1298 }
1299 }
1300
1301 if (start != end) {
1302 hid_err(device, "item fetching failed at offset %u/%u\n",
1303 device->rsize - (unsigned int)(end - start),
1304 device->rsize);
1305 goto out;
1306 }
1307
1308 if (parser->collection_stack_ptr) {
1309 hid_err(device, "unbalanced collection at end of report description\n");
1310 goto out;
1311 }
1312
1313 if (parser->local.delimiter_depth) {
1314 hid_err(device, "unbalanced delimiter at end of report description\n");
1315 goto out;
1316 }
1317
1318 /*
1319 * fetch initial values in case the device's
1320 * default multiplier isn't the recommended 1
1321 */
1322 hid_setup_resolution_multiplier(device);
1323
1324 device->status |= HID_STAT_PARSED;
1325 ret = 0;
1326
1327 out:
1328 kfree(parser->collection_stack);
1329 return ret;
1330 }
1331
1332 /**
1333 * hid_open_report - open a driver-specific device report
1334 *
1335 * @device: hid device
1336 *
1337 * Parse a report description into a hid_device structure. Reports are
1338 * enumerated, fields are attached to these reports.
1339 * 0 returned on success, otherwise nonzero error value.
1340 *
1341 * This function (or the equivalent hid_parse() macro) should only be
1342 * called from probe() in drivers, before starting the device.
1343 */
hid_open_report(struct hid_device * device)1344 int hid_open_report(struct hid_device *device)
1345 {
1346 unsigned int size;
1347 const u8 *start;
1348 int error;
1349
1350 if (WARN_ON(device->status & HID_STAT_PARSED))
1351 return -EBUSY;
1352
1353 start = device->bpf_rdesc;
1354 if (WARN_ON(!start))
1355 return -ENODEV;
1356 size = device->bpf_rsize;
1357
1358 if (device->driver->report_fixup) {
1359 /*
1360 * device->driver->report_fixup() needs to work
1361 * on a copy of our report descriptor so it can
1362 * change it.
1363 */
1364 u8 *buf __free(kfree) = kmemdup(start, size, GFP_KERNEL);
1365
1366 if (!buf)
1367 return -ENOMEM;
1368
1369 start = device->driver->report_fixup(device, buf, &size);
1370
1371 /*
1372 * The second kmemdup is required in case report_fixup() returns
1373 * a static read-only memory, but we have no idea if that memory
1374 * needs to be cleaned up or not at the end.
1375 */
1376 start = kmemdup(start, size, GFP_KERNEL);
1377 if (!start)
1378 return -ENOMEM;
1379 }
1380
1381 device->rdesc = start;
1382 device->rsize = size;
1383
1384 error = hid_parse_collections(device);
1385 if (error) {
1386 hid_close_report(device);
1387 return error;
1388 }
1389
1390 return 0;
1391 }
1392 EXPORT_SYMBOL_GPL(hid_open_report);
1393
1394 /*
1395 * Extract/implement a data field from/to a little endian report (bit array).
1396 *
1397 * Code sort-of follows HID spec:
1398 * http://www.usb.org/developers/hidpage/HID1_11.pdf
1399 *
1400 * While the USB HID spec allows unlimited length bit fields in "report
1401 * descriptors", most devices never use more than 16 bits.
1402 * One model of UPS is claimed to report "LINEV" as a 32-bit field.
1403 * Search linux-kernel and linux-usb-devel archives for "hid-core extract".
1404 */
1405
__extract(u8 * report,unsigned offset,int n)1406 static u32 __extract(u8 *report, unsigned offset, int n)
1407 {
1408 unsigned int idx = offset / 8;
1409 unsigned int bit_nr = 0;
1410 unsigned int bit_shift = offset % 8;
1411 int bits_to_copy = 8 - bit_shift;
1412 u32 value = 0;
1413 u32 mask = n < 32 ? (1U << n) - 1 : ~0U;
1414
1415 while (n > 0) {
1416 value |= ((u32)report[idx] >> bit_shift) << bit_nr;
1417 n -= bits_to_copy;
1418 bit_nr += bits_to_copy;
1419 bits_to_copy = 8;
1420 bit_shift = 0;
1421 idx++;
1422 }
1423
1424 return value & mask;
1425 }
1426
hid_field_extract(const struct hid_device * hid,u8 * report,unsigned offset,unsigned n)1427 u32 hid_field_extract(const struct hid_device *hid, u8 *report,
1428 unsigned offset, unsigned n)
1429 {
1430 if (n > 32) {
1431 hid_warn_once(hid, "%s() called with n (%d) > 32! (%s)\n",
1432 __func__, n, current->comm);
1433 n = 32;
1434 }
1435
1436 return __extract(report, offset, n);
1437 }
1438 EXPORT_SYMBOL_GPL(hid_field_extract);
1439
1440 /*
1441 * "implement" : set bits in a little endian bit stream.
1442 * Same concepts as "extract" (see comments above).
1443 * The data mangled in the bit stream remains in little endian
1444 * order the whole time. It make more sense to talk about
1445 * endianness of register values by considering a register
1446 * a "cached" copy of the little endian bit stream.
1447 */
1448
__implement(u8 * report,unsigned offset,int n,u32 value)1449 static void __implement(u8 *report, unsigned offset, int n, u32 value)
1450 {
1451 unsigned int idx = offset / 8;
1452 unsigned int bit_shift = offset % 8;
1453 int bits_to_set = 8 - bit_shift;
1454
1455 while (n - bits_to_set >= 0) {
1456 report[idx] &= ~(0xff << bit_shift);
1457 report[idx] |= value << bit_shift;
1458 value >>= bits_to_set;
1459 n -= bits_to_set;
1460 bits_to_set = 8;
1461 bit_shift = 0;
1462 idx++;
1463 }
1464
1465 /* last nibble */
1466 if (n) {
1467 u8 bit_mask = ((1U << n) - 1);
1468 report[idx] &= ~(bit_mask << bit_shift);
1469 report[idx] |= value << bit_shift;
1470 }
1471 }
1472
implement(const struct hid_device * hid,u8 * report,unsigned offset,unsigned n,u32 value)1473 static void implement(const struct hid_device *hid, u8 *report,
1474 unsigned offset, unsigned n, u32 value)
1475 {
1476 if (unlikely(n > 32)) {
1477 hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n",
1478 __func__, n, current->comm);
1479 n = 32;
1480 } else if (n < 32) {
1481 u32 m = (1U << n) - 1;
1482
1483 if (unlikely(value > m)) {
1484 hid_warn(hid,
1485 "%s() called with too large value %d (n: %d)! (%s)\n",
1486 __func__, value, n, current->comm);
1487 value &= m;
1488 }
1489 }
1490
1491 __implement(report, offset, n, value);
1492 }
1493
1494 /*
1495 * Search an array for a value.
1496 */
1497
search(__s32 * array,__s32 value,unsigned n)1498 static int search(__s32 *array, __s32 value, unsigned n)
1499 {
1500 while (n--) {
1501 if (*array++ == value)
1502 return 0;
1503 }
1504 return -1;
1505 }
1506
1507 /**
1508 * hid_match_report - check if driver's raw_event should be called
1509 *
1510 * @hid: hid device
1511 * @report: hid report to match against
1512 *
1513 * compare hid->driver->report_table->report_type to report->type
1514 */
hid_match_report(struct hid_device * hid,struct hid_report * report)1515 static int hid_match_report(struct hid_device *hid, struct hid_report *report)
1516 {
1517 const struct hid_report_id *id = hid->driver->report_table;
1518
1519 if (!id) /* NULL means all */
1520 return 1;
1521
1522 for (; id->report_type != HID_TERMINATOR; id++)
1523 if (id->report_type == HID_ANY_ID ||
1524 id->report_type == report->type)
1525 return 1;
1526 return 0;
1527 }
1528
1529 /**
1530 * hid_match_usage - check if driver's event should be called
1531 *
1532 * @hid: hid device
1533 * @usage: usage to match against
1534 *
1535 * compare hid->driver->usage_table->usage_{type,code} to
1536 * usage->usage_{type,code}
1537 */
hid_match_usage(struct hid_device * hid,struct hid_usage * usage)1538 static int hid_match_usage(struct hid_device *hid, struct hid_usage *usage)
1539 {
1540 const struct hid_usage_id *id = hid->driver->usage_table;
1541
1542 if (!id) /* NULL means all */
1543 return 1;
1544
1545 for (; id->usage_type != HID_ANY_ID - 1; id++)
1546 if ((id->usage_hid == HID_ANY_ID ||
1547 id->usage_hid == usage->hid) &&
1548 (id->usage_type == HID_ANY_ID ||
1549 id->usage_type == usage->type) &&
1550 (id->usage_code == HID_ANY_ID ||
1551 id->usage_code == usage->code))
1552 return 1;
1553 return 0;
1554 }
1555
hid_process_event(struct hid_device * hid,struct hid_field * field,struct hid_usage * usage,__s32 value,int interrupt)1556 static void hid_process_event(struct hid_device *hid, struct hid_field *field,
1557 struct hid_usage *usage, __s32 value, int interrupt)
1558 {
1559 struct hid_driver *hdrv = hid->driver;
1560 int ret;
1561
1562 if (!list_empty(&hid->debug_list))
1563 hid_dump_input(hid, usage, value);
1564
1565 if (hdrv && hdrv->event && hid_match_usage(hid, usage)) {
1566 ret = hdrv->event(hid, field, usage, value);
1567 if (ret != 0) {
1568 if (ret < 0)
1569 hid_err(hid, "%s's event failed with %d\n",
1570 hdrv->name, ret);
1571 return;
1572 }
1573 }
1574
1575 if (hid->claimed & HID_CLAIMED_INPUT)
1576 hidinput_hid_event(hid, field, usage, value);
1577 if (hid->claimed & HID_CLAIMED_HIDDEV && interrupt && hid->hiddev_hid_event)
1578 hid->hiddev_hid_event(hid, field, usage, value);
1579 }
1580
1581 /*
1582 * Checks if the given value is valid within this field
1583 */
hid_array_value_is_valid(struct hid_field * field,__s32 value)1584 static inline int hid_array_value_is_valid(struct hid_field *field,
1585 __s32 value)
1586 {
1587 __s32 min = field->logical_minimum;
1588
1589 /*
1590 * Value needs to be between logical min and max, and
1591 * (value - min) is used as an index in the usage array.
1592 * This array is of size field->maxusage
1593 */
1594 return value >= min &&
1595 value <= field->logical_maximum &&
1596 value - min < field->maxusage;
1597 }
1598
1599 /*
1600 * Fetch the field from the data. The field content is stored for next
1601 * report processing (we do differential reporting to the layer).
1602 */
hid_input_fetch_field(struct hid_device * hid,struct hid_field * field,__u8 * data)1603 static void hid_input_fetch_field(struct hid_device *hid,
1604 struct hid_field *field,
1605 __u8 *data)
1606 {
1607 unsigned n;
1608 unsigned count = field->report_count;
1609 unsigned offset = field->report_offset;
1610 unsigned size = field->report_size;
1611 __s32 min = field->logical_minimum;
1612 __s32 *value;
1613
1614 value = field->new_value;
1615 memset(value, 0, count * sizeof(__s32));
1616 field->ignored = false;
1617
1618 for (n = 0; n < count; n++) {
1619
1620 value[n] = min < 0 ?
1621 snto32(hid_field_extract(hid, data, offset + n * size,
1622 size), size) :
1623 hid_field_extract(hid, data, offset + n * size, size);
1624
1625 /* Ignore report if ErrorRollOver */
1626 if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
1627 hid_array_value_is_valid(field, value[n]) &&
1628 field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1) {
1629 field->ignored = true;
1630 return;
1631 }
1632 }
1633 }
1634
1635 /*
1636 * Process a received variable field.
1637 */
1638
hid_input_var_field(struct hid_device * hid,struct hid_field * field,int interrupt)1639 static void hid_input_var_field(struct hid_device *hid,
1640 struct hid_field *field,
1641 int interrupt)
1642 {
1643 unsigned int count = field->report_count;
1644 __s32 *value = field->new_value;
1645 unsigned int n;
1646
1647 for (n = 0; n < count; n++)
1648 hid_process_event(hid,
1649 field,
1650 &field->usage[n],
1651 value[n],
1652 interrupt);
1653
1654 memcpy(field->value, value, count * sizeof(__s32));
1655 }
1656
1657 /*
1658 * Process a received array field. The field content is stored for
1659 * next report processing (we do differential reporting to the layer).
1660 */
1661
hid_input_array_field(struct hid_device * hid,struct hid_field * field,int interrupt)1662 static void hid_input_array_field(struct hid_device *hid,
1663 struct hid_field *field,
1664 int interrupt)
1665 {
1666 unsigned int n;
1667 unsigned int count = field->report_count;
1668 __s32 min = field->logical_minimum;
1669 __s32 *value;
1670
1671 value = field->new_value;
1672
1673 /* ErrorRollOver */
1674 if (field->ignored)
1675 return;
1676
1677 for (n = 0; n < count; n++) {
1678 if (hid_array_value_is_valid(field, field->value[n]) &&
1679 search(value, field->value[n], count))
1680 hid_process_event(hid,
1681 field,
1682 &field->usage[field->value[n] - min],
1683 0,
1684 interrupt);
1685
1686 if (hid_array_value_is_valid(field, value[n]) &&
1687 search(field->value, value[n], count))
1688 hid_process_event(hid,
1689 field,
1690 &field->usage[value[n] - min],
1691 1,
1692 interrupt);
1693 }
1694
1695 memcpy(field->value, value, count * sizeof(__s32));
1696 }
1697
1698 /*
1699 * Analyse a received report, and fetch the data from it. The field
1700 * content is stored for next report processing (we do differential
1701 * reporting to the layer).
1702 */
hid_process_report(struct hid_device * hid,struct hid_report * report,__u8 * data,int interrupt)1703 static void hid_process_report(struct hid_device *hid,
1704 struct hid_report *report,
1705 __u8 *data,
1706 int interrupt)
1707 {
1708 unsigned int a;
1709 struct hid_field_entry *entry;
1710 struct hid_field *field;
1711
1712 /* first retrieve all incoming values in data */
1713 for (a = 0; a < report->maxfield; a++)
1714 hid_input_fetch_field(hid, report->field[a], data);
1715
1716 if (!list_empty(&report->field_entry_list)) {
1717 /* INPUT_REPORT, we have a priority list of fields */
1718 list_for_each_entry(entry,
1719 &report->field_entry_list,
1720 list) {
1721 field = entry->field;
1722
1723 if (field->flags & HID_MAIN_ITEM_VARIABLE)
1724 hid_process_event(hid,
1725 field,
1726 &field->usage[entry->index],
1727 field->new_value[entry->index],
1728 interrupt);
1729 else
1730 hid_input_array_field(hid, field, interrupt);
1731 }
1732
1733 /* we need to do the memcpy at the end for var items */
1734 for (a = 0; a < report->maxfield; a++) {
1735 field = report->field[a];
1736
1737 if (field->flags & HID_MAIN_ITEM_VARIABLE)
1738 memcpy(field->value, field->new_value,
1739 field->report_count * sizeof(__s32));
1740 }
1741 } else {
1742 /* FEATURE_REPORT, regular processing */
1743 for (a = 0; a < report->maxfield; a++) {
1744 field = report->field[a];
1745
1746 if (field->flags & HID_MAIN_ITEM_VARIABLE)
1747 hid_input_var_field(hid, field, interrupt);
1748 else
1749 hid_input_array_field(hid, field, interrupt);
1750 }
1751 }
1752 }
1753
1754 /*
1755 * Insert a given usage_index in a field in the list
1756 * of processed usages in the report.
1757 *
1758 * The elements of lower priority score are processed
1759 * first.
1760 */
__hid_insert_field_entry(struct hid_device * hid,struct hid_report * report,struct hid_field_entry * entry,struct hid_field * field,unsigned int usage_index)1761 static void __hid_insert_field_entry(struct hid_device *hid,
1762 struct hid_report *report,
1763 struct hid_field_entry *entry,
1764 struct hid_field *field,
1765 unsigned int usage_index)
1766 {
1767 struct hid_field_entry *next;
1768
1769 entry->field = field;
1770 entry->index = usage_index;
1771 entry->priority = field->usages_priorities[usage_index];
1772
1773 /* insert the element at the correct position */
1774 list_for_each_entry(next,
1775 &report->field_entry_list,
1776 list) {
1777 /*
1778 * the priority of our element is strictly higher
1779 * than the next one, insert it before
1780 */
1781 if (entry->priority > next->priority) {
1782 list_add_tail(&entry->list, &next->list);
1783 return;
1784 }
1785 }
1786
1787 /* lowest priority score: insert at the end */
1788 list_add_tail(&entry->list, &report->field_entry_list);
1789 }
1790
hid_report_process_ordering(struct hid_device * hid,struct hid_report * report)1791 static void hid_report_process_ordering(struct hid_device *hid,
1792 struct hid_report *report)
1793 {
1794 struct hid_field *field;
1795 struct hid_field_entry *entries;
1796 unsigned int a, u, usages;
1797 unsigned int count = 0;
1798
1799 /* count the number of individual fields in the report */
1800 for (a = 0; a < report->maxfield; a++) {
1801 field = report->field[a];
1802
1803 if (field->flags & HID_MAIN_ITEM_VARIABLE)
1804 count += field->report_count;
1805 else
1806 count++;
1807 }
1808
1809 /* allocate the memory to process the fields */
1810 entries = kzalloc_objs(*entries, count);
1811 if (!entries)
1812 return;
1813
1814 report->field_entries = entries;
1815
1816 /*
1817 * walk through all fields in the report and
1818 * store them by priority order in report->field_entry_list
1819 *
1820 * - Var elements are individualized (field + usage_index)
1821 * - Arrays are taken as one, we can not chose an order for them
1822 */
1823 usages = 0;
1824 for (a = 0; a < report->maxfield; a++) {
1825 field = report->field[a];
1826
1827 if (field->flags & HID_MAIN_ITEM_VARIABLE) {
1828 for (u = 0; u < field->report_count; u++) {
1829 __hid_insert_field_entry(hid, report,
1830 &entries[usages],
1831 field, u);
1832 usages++;
1833 }
1834 } else {
1835 __hid_insert_field_entry(hid, report, &entries[usages],
1836 field, 0);
1837 usages++;
1838 }
1839 }
1840 }
1841
hid_process_ordering(struct hid_device * hid)1842 static void hid_process_ordering(struct hid_device *hid)
1843 {
1844 struct hid_report *report;
1845 struct hid_report_enum *report_enum = &hid->report_enum[HID_INPUT_REPORT];
1846
1847 list_for_each_entry(report, &report_enum->report_list, list)
1848 hid_report_process_ordering(hid, report);
1849 }
1850
1851 /*
1852 * Output the field into the report.
1853 */
1854
hid_output_field(const struct hid_device * hid,struct hid_field * field,__u8 * data)1855 static void hid_output_field(const struct hid_device *hid,
1856 struct hid_field *field, __u8 *data)
1857 {
1858 unsigned count = field->report_count;
1859 unsigned offset = field->report_offset;
1860 unsigned size = field->report_size;
1861 unsigned n;
1862
1863 for (n = 0; n < count; n++) {
1864 if (field->logical_minimum < 0) /* signed values */
1865 implement(hid, data, offset + n * size, size,
1866 s32ton(field->value[n], size));
1867 else /* unsigned values */
1868 implement(hid, data, offset + n * size, size,
1869 field->value[n]);
1870 }
1871 }
1872
1873 /*
1874 * Compute the size of a report.
1875 */
hid_compute_report_size(struct hid_report * report)1876 static size_t hid_compute_report_size(struct hid_report *report)
1877 {
1878 if (report->size)
1879 return ((report->size - 1) >> 3) + 1;
1880
1881 return 0;
1882 }
1883
1884 /*
1885 * Create a report. 'data' has to be allocated using
1886 * hid_alloc_report_buf() so that it has proper size.
1887 */
1888
hid_output_report(struct hid_report * report,__u8 * data)1889 void hid_output_report(struct hid_report *report, __u8 *data)
1890 {
1891 unsigned n;
1892
1893 if (report->id > 0)
1894 *data++ = report->id;
1895
1896 memset(data, 0, hid_compute_report_size(report));
1897 for (n = 0; n < report->maxfield; n++)
1898 hid_output_field(report->device, report->field[n], data);
1899 }
1900 EXPORT_SYMBOL_GPL(hid_output_report);
1901
1902 /*
1903 * Allocator for buffer that is going to be passed to hid_output_report()
1904 */
hid_alloc_report_buf(struct hid_report * report,gfp_t flags)1905 u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
1906 {
1907 /*
1908 * 7 extra bytes are necessary to achieve proper functionality
1909 * of implement() working on 8 byte chunks
1910 * 1 extra byte for the report ID if it is null (not used) so
1911 * we can reserve that extra byte in the first position of the buffer
1912 * when sending it to .raw_request()
1913 */
1914
1915 u32 len = hid_report_len(report) + 7 + (report->id == 0);
1916
1917 return kzalloc(len, flags);
1918 }
1919 EXPORT_SYMBOL_GPL(hid_alloc_report_buf);
1920
1921 /*
1922 * Set a field value. The report this field belongs to has to be
1923 * created and transferred to the device, to set this value in the
1924 * device.
1925 */
1926
hid_set_field(struct hid_field * field,unsigned offset,__s32 value)1927 int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
1928 {
1929 unsigned size;
1930
1931 if (!field)
1932 return -1;
1933
1934 size = field->report_size;
1935
1936 hid_dump_input(field->report->device, field->usage + offset, value);
1937
1938 if (offset >= field->report_count) {
1939 hid_err(field->report->device, "offset (%d) exceeds report_count (%d)\n",
1940 offset, field->report_count);
1941 return -1;
1942 }
1943 if (field->logical_minimum < 0) {
1944 if (value != snto32(s32ton(value, size), size)) {
1945 hid_err(field->report->device, "value %d is out of range\n", value);
1946 return -1;
1947 }
1948 }
1949 field->value[offset] = value;
1950 return 0;
1951 }
1952 EXPORT_SYMBOL_GPL(hid_set_field);
1953
hid_find_field(struct hid_device * hdev,unsigned int report_type,unsigned int application,unsigned int usage)1954 struct hid_field *hid_find_field(struct hid_device *hdev, unsigned int report_type,
1955 unsigned int application, unsigned int usage)
1956 {
1957 struct list_head *report_list = &hdev->report_enum[report_type].report_list;
1958 struct hid_report *report;
1959 int i, j;
1960
1961 list_for_each_entry(report, report_list, list) {
1962 if (report->application != application)
1963 continue;
1964
1965 for (i = 0; i < report->maxfield; i++) {
1966 struct hid_field *field = report->field[i];
1967
1968 for (j = 0; j < field->maxusage; j++) {
1969 if (field->usage[j].hid == usage)
1970 return field;
1971 }
1972 }
1973 }
1974
1975 return NULL;
1976 }
1977 EXPORT_SYMBOL_GPL(hid_find_field);
1978
hid_get_report(struct hid_report_enum * report_enum,const u8 * data)1979 static struct hid_report *hid_get_report(struct hid_report_enum *report_enum,
1980 const u8 *data)
1981 {
1982 struct hid_report *report;
1983 unsigned int n = 0; /* Normally report number is 0 */
1984
1985 /* Device uses numbered reports, data[0] is report number */
1986 if (report_enum->numbered)
1987 n = *data;
1988
1989 report = report_enum->report_id_hash[n];
1990 if (report == NULL)
1991 dbg_hid("undefined report_id %u received\n", n);
1992
1993 return report;
1994 }
1995
1996 /*
1997 * Implement a generic .request() callback, using .raw_request()
1998 * DO NOT USE in hid drivers directly, but through hid_hw_request instead.
1999 */
__hid_request(struct hid_device * hid,struct hid_report * report,enum hid_class_request reqtype)2000 int __hid_request(struct hid_device *hid, struct hid_report *report,
2001 enum hid_class_request reqtype)
2002 {
2003 u8 *data_buf;
2004 int ret;
2005 u32 len;
2006
2007 u8 *buf __free(kfree) = hid_alloc_report_buf(report, GFP_KERNEL);
2008 if (!buf)
2009 return -ENOMEM;
2010
2011 data_buf = buf;
2012 len = hid_report_len(report);
2013
2014 if (report->id == 0) {
2015 /* reserve the first byte for the report ID */
2016 data_buf++;
2017 len++;
2018 }
2019
2020 if (reqtype == HID_REQ_SET_REPORT)
2021 hid_output_report(report, data_buf);
2022
2023 ret = hid_hw_raw_request(hid, report->id, buf, len, report->type, reqtype);
2024 if (ret < 0) {
2025 dbg_hid("unable to complete request: %d\n", ret);
2026 return ret;
2027 }
2028
2029 if (reqtype == HID_REQ_GET_REPORT)
2030 hid_input_report(hid, report->type, buf, ret, 0);
2031
2032 return 0;
2033 }
2034 EXPORT_SYMBOL_GPL(__hid_request);
2035
hid_report_raw_event(struct hid_device * hid,enum hid_report_type type,u8 * data,u32 size,int interrupt)2036 int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
2037 int interrupt)
2038 {
2039 struct hid_report_enum *report_enum = hid->report_enum + type;
2040 struct hid_report *report;
2041 struct hid_driver *hdrv;
2042 int max_buffer_size = HID_MAX_BUFFER_SIZE;
2043 u32 rsize, csize = size;
2044 u8 *cdata = data;
2045 int ret = 0;
2046
2047 report = hid_get_report(report_enum, data);
2048 if (!report)
2049 goto out;
2050
2051 if (report_enum->numbered) {
2052 cdata++;
2053 csize--;
2054 }
2055
2056 rsize = hid_compute_report_size(report);
2057
2058 if (hid->ll_driver->max_buffer_size)
2059 max_buffer_size = hid->ll_driver->max_buffer_size;
2060
2061 if (report_enum->numbered && rsize >= max_buffer_size)
2062 rsize = max_buffer_size - 1;
2063 else if (rsize > max_buffer_size)
2064 rsize = max_buffer_size;
2065
2066 if (csize < rsize) {
2067 hid_warn_ratelimited(hid, "Event data for report %d was too short (%d vs %d)\n",
2068 report->id, rsize, csize);
2069 ret = -EINVAL;
2070 goto out;
2071 }
2072
2073 if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event)
2074 hid->hiddev_report_event(hid, report);
2075 if (hid->claimed & HID_CLAIMED_HIDRAW) {
2076 ret = hidraw_report_event(hid, data, size);
2077 if (ret)
2078 goto out;
2079 }
2080
2081 if (hid->claimed != HID_CLAIMED_HIDRAW && report->maxfield) {
2082 hid_process_report(hid, report, cdata, interrupt);
2083 hdrv = hid->driver;
2084 if (hdrv && hdrv->report)
2085 hdrv->report(hid, report);
2086 }
2087
2088 if (hid->claimed & HID_CLAIMED_INPUT)
2089 hidinput_report_event(hid, report);
2090 out:
2091 return ret;
2092 }
2093 EXPORT_SYMBOL_GPL(hid_report_raw_event);
2094
2095
__hid_input_report(struct hid_device * hid,enum hid_report_type type,u8 * data,u32 size,int interrupt,u64 source,bool from_bpf,bool lock_already_taken)2096 static int __hid_input_report(struct hid_device *hid, enum hid_report_type type,
2097 u8 *data, u32 size, int interrupt, u64 source, bool from_bpf,
2098 bool lock_already_taken)
2099 {
2100 struct hid_report_enum *report_enum;
2101 struct hid_driver *hdrv;
2102 struct hid_report *report;
2103 int ret = 0;
2104
2105 if (!hid)
2106 return -ENODEV;
2107
2108 ret = down_trylock(&hid->driver_input_lock);
2109 if (lock_already_taken && !ret) {
2110 up(&hid->driver_input_lock);
2111 return -EINVAL;
2112 } else if (!lock_already_taken && ret) {
2113 return -EBUSY;
2114 }
2115
2116 if (!hid->driver) {
2117 ret = -ENODEV;
2118 goto unlock;
2119 }
2120 report_enum = hid->report_enum + type;
2121 hdrv = hid->driver;
2122
2123 data = dispatch_hid_bpf_device_event(hid, type, data, &size, interrupt, source, from_bpf);
2124 if (IS_ERR(data)) {
2125 ret = PTR_ERR(data);
2126 goto unlock;
2127 }
2128
2129 if (!size) {
2130 dbg_hid("empty report\n");
2131 ret = -1;
2132 goto unlock;
2133 }
2134
2135 /* Avoid unnecessary overhead if debugfs is disabled */
2136 if (!list_empty(&hid->debug_list))
2137 hid_dump_report(hid, type, data, size);
2138
2139 report = hid_get_report(report_enum, data);
2140
2141 if (!report) {
2142 ret = -1;
2143 goto unlock;
2144 }
2145
2146 if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) {
2147 ret = hdrv->raw_event(hid, report, data, size);
2148 if (ret < 0)
2149 goto unlock;
2150 }
2151
2152 ret = hid_report_raw_event(hid, type, data, size, interrupt);
2153
2154 unlock:
2155 if (!lock_already_taken)
2156 up(&hid->driver_input_lock);
2157 return ret;
2158 }
2159
2160 /**
2161 * hid_input_report - report data from lower layer (usb, bt...)
2162 *
2163 * @hid: hid device
2164 * @type: HID report type (HID_*_REPORT)
2165 * @data: report contents
2166 * @size: size of data parameter
2167 * @interrupt: distinguish between interrupt and control transfers
2168 *
2169 * This is data entry for lower layers.
2170 */
hid_input_report(struct hid_device * hid,enum hid_report_type type,u8 * data,u32 size,int interrupt)2171 int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
2172 int interrupt)
2173 {
2174 return __hid_input_report(hid, type, data, size, interrupt, 0,
2175 false, /* from_bpf */
2176 false /* lock_already_taken */);
2177 }
2178 EXPORT_SYMBOL_GPL(hid_input_report);
2179
hid_match_one_id(const struct hid_device * hdev,const struct hid_device_id * id)2180 bool hid_match_one_id(const struct hid_device *hdev,
2181 const struct hid_device_id *id)
2182 {
2183 return (id->bus == HID_BUS_ANY || id->bus == hdev->bus) &&
2184 (id->group == HID_GROUP_ANY || id->group == hdev->group) &&
2185 (id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) &&
2186 (id->product == HID_ANY_ID || id->product == hdev->product);
2187 }
2188
hid_match_id(const struct hid_device * hdev,const struct hid_device_id * id)2189 const struct hid_device_id *hid_match_id(const struct hid_device *hdev,
2190 const struct hid_device_id *id)
2191 {
2192 for (; id->bus; id++)
2193 if (hid_match_one_id(hdev, id))
2194 return id;
2195
2196 return NULL;
2197 }
2198 EXPORT_SYMBOL_GPL(hid_match_id);
2199
2200 static const struct hid_device_id hid_hiddev_list[] = {
2201 { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS) },
2202 { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS1) },
2203 { }
2204 };
2205
hid_hiddev(struct hid_device * hdev)2206 static bool hid_hiddev(struct hid_device *hdev)
2207 {
2208 return !!hid_match_id(hdev, hid_hiddev_list);
2209 }
2210
2211
2212 static ssize_t
report_descriptor_read(struct file * filp,struct kobject * kobj,const struct bin_attribute * attr,char * buf,loff_t off,size_t count)2213 report_descriptor_read(struct file *filp, struct kobject *kobj,
2214 const struct bin_attribute *attr,
2215 char *buf, loff_t off, size_t count)
2216 {
2217 struct device *dev = kobj_to_dev(kobj);
2218 struct hid_device *hdev = to_hid_device(dev);
2219
2220 if (off >= hdev->rsize)
2221 return 0;
2222
2223 if (off + count > hdev->rsize)
2224 count = hdev->rsize - off;
2225
2226 memcpy(buf, hdev->rdesc + off, count);
2227
2228 return count;
2229 }
2230
2231 static ssize_t
country_show(struct device * dev,struct device_attribute * attr,char * buf)2232 country_show(struct device *dev, struct device_attribute *attr,
2233 char *buf)
2234 {
2235 struct hid_device *hdev = to_hid_device(dev);
2236
2237 return sprintf(buf, "%02x\n", hdev->country & 0xff);
2238 }
2239
2240 static const BIN_ATTR_RO(report_descriptor, HID_MAX_DESCRIPTOR_SIZE);
2241
2242 static const DEVICE_ATTR_RO(country);
2243
hid_connect(struct hid_device * hdev,unsigned int connect_mask)2244 int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
2245 {
2246 static const char *types[] = { "Device", "Pointer", "Mouse", "Device",
2247 "Joystick", "Gamepad", "Keyboard", "Keypad",
2248 "Multi-Axis Controller"
2249 };
2250 const char *type, *bus;
2251 char buf[64] = "";
2252 unsigned int i;
2253 int len;
2254 int ret;
2255
2256 ret = hid_bpf_connect_device(hdev);
2257 if (ret)
2258 return ret;
2259
2260 if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE)
2261 connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV);
2262 if (hdev->quirks & HID_QUIRK_HIDINPUT_FORCE)
2263 connect_mask |= HID_CONNECT_HIDINPUT_FORCE;
2264 if (hdev->bus != BUS_USB)
2265 connect_mask &= ~HID_CONNECT_HIDDEV;
2266 if (hid_hiddev(hdev))
2267 connect_mask |= HID_CONNECT_HIDDEV_FORCE;
2268
2269 if ((connect_mask & HID_CONNECT_HIDINPUT) && !hidinput_connect(hdev,
2270 connect_mask & HID_CONNECT_HIDINPUT_FORCE))
2271 hdev->claimed |= HID_CLAIMED_INPUT;
2272
2273 if ((connect_mask & HID_CONNECT_HIDDEV) && hdev->hiddev_connect &&
2274 !hdev->hiddev_connect(hdev,
2275 connect_mask & HID_CONNECT_HIDDEV_FORCE))
2276 hdev->claimed |= HID_CLAIMED_HIDDEV;
2277 if ((connect_mask & HID_CONNECT_HIDRAW) && !hidraw_connect(hdev))
2278 hdev->claimed |= HID_CLAIMED_HIDRAW;
2279
2280 if (connect_mask & HID_CONNECT_DRIVER)
2281 hdev->claimed |= HID_CLAIMED_DRIVER;
2282
2283 /* Drivers with the ->raw_event callback set are not required to connect
2284 * to any other listener. */
2285 if (!hdev->claimed && !hdev->driver->raw_event) {
2286 hid_err(hdev, "device has no listeners, quitting\n");
2287 return -ENODEV;
2288 }
2289
2290 hid_process_ordering(hdev);
2291
2292 if ((hdev->claimed & HID_CLAIMED_INPUT) &&
2293 (connect_mask & HID_CONNECT_FF) && hdev->ff_init)
2294 hdev->ff_init(hdev);
2295
2296 len = 0;
2297 if (hdev->claimed & HID_CLAIMED_INPUT)
2298 len += sprintf(buf + len, "input");
2299 if (hdev->claimed & HID_CLAIMED_HIDDEV)
2300 len += sprintf(buf + len, "%shiddev%d", len ? "," : "",
2301 ((struct hiddev *)hdev->hiddev)->minor);
2302 if (hdev->claimed & HID_CLAIMED_HIDRAW)
2303 len += sprintf(buf + len, "%shidraw%d", len ? "," : "",
2304 ((struct hidraw *)hdev->hidraw)->minor);
2305
2306 type = "Device";
2307 for (i = 0; i < hdev->maxcollection; i++) {
2308 struct hid_collection *col = &hdev->collection[i];
2309 if (col->type == HID_COLLECTION_APPLICATION &&
2310 (col->usage & HID_USAGE_PAGE) == HID_UP_GENDESK &&
2311 (col->usage & 0xffff) < ARRAY_SIZE(types)) {
2312 type = types[col->usage & 0xffff];
2313 break;
2314 }
2315 }
2316
2317 switch (hdev->bus) {
2318 case BUS_USB:
2319 bus = "USB";
2320 break;
2321 case BUS_BLUETOOTH:
2322 bus = "BLUETOOTH";
2323 break;
2324 case BUS_I2C:
2325 bus = "I2C";
2326 break;
2327 case BUS_SDW:
2328 bus = "SOUNDWIRE";
2329 break;
2330 case BUS_VIRTUAL:
2331 bus = "VIRTUAL";
2332 break;
2333 case BUS_INTEL_ISHTP:
2334 case BUS_AMD_SFH:
2335 bus = "SENSOR HUB";
2336 break;
2337 default:
2338 bus = "<UNKNOWN>";
2339 }
2340
2341 ret = device_create_file(&hdev->dev, &dev_attr_country);
2342 if (ret)
2343 hid_warn(hdev,
2344 "can't create sysfs country code attribute err: %d\n", ret);
2345
2346 hid_info(hdev, "%s: %s HID v%x.%02x %s [%s] on %s\n",
2347 buf, bus, hdev->version >> 8, hdev->version & 0xff,
2348 type, hdev->name, hdev->phys);
2349
2350 return 0;
2351 }
2352 EXPORT_SYMBOL_GPL(hid_connect);
2353
hid_disconnect(struct hid_device * hdev)2354 void hid_disconnect(struct hid_device *hdev)
2355 {
2356 device_remove_file(&hdev->dev, &dev_attr_country);
2357 if (hdev->claimed & HID_CLAIMED_INPUT)
2358 hidinput_disconnect(hdev);
2359 if (hdev->claimed & HID_CLAIMED_HIDDEV)
2360 hdev->hiddev_disconnect(hdev);
2361 if (hdev->claimed & HID_CLAIMED_HIDRAW)
2362 hidraw_disconnect(hdev);
2363 hdev->claimed = 0;
2364
2365 hid_bpf_disconnect_device(hdev);
2366 }
2367 EXPORT_SYMBOL_GPL(hid_disconnect);
2368
2369 /**
2370 * hid_hw_start - start underlying HW
2371 * @hdev: hid device
2372 * @connect_mask: which outputs to connect, see HID_CONNECT_*
2373 *
2374 * Call this in probe function *after* hid_parse. This will setup HW
2375 * buffers and start the device (if not defeirred to device open).
2376 * hid_hw_stop must be called if this was successful.
2377 */
hid_hw_start(struct hid_device * hdev,unsigned int connect_mask)2378 int hid_hw_start(struct hid_device *hdev, unsigned int connect_mask)
2379 {
2380 int error;
2381
2382 error = hdev->ll_driver->start(hdev);
2383 if (error)
2384 return error;
2385
2386 if (connect_mask) {
2387 error = hid_connect(hdev, connect_mask);
2388 if (error) {
2389 hdev->ll_driver->stop(hdev);
2390 return error;
2391 }
2392 }
2393
2394 return 0;
2395 }
2396 EXPORT_SYMBOL_GPL(hid_hw_start);
2397
2398 /**
2399 * hid_hw_stop - stop underlying HW
2400 * @hdev: hid device
2401 *
2402 * This is usually called from remove function or from probe when something
2403 * failed and hid_hw_start was called already.
2404 */
hid_hw_stop(struct hid_device * hdev)2405 void hid_hw_stop(struct hid_device *hdev)
2406 {
2407 hid_disconnect(hdev);
2408 hdev->ll_driver->stop(hdev);
2409 }
2410 EXPORT_SYMBOL_GPL(hid_hw_stop);
2411
2412 /**
2413 * hid_hw_open - signal underlying HW to start delivering events
2414 * @hdev: hid device
2415 *
2416 * Tell underlying HW to start delivering events from the device.
2417 * This function should be called sometime after successful call
2418 * to hid_hw_start().
2419 */
hid_hw_open(struct hid_device * hdev)2420 int hid_hw_open(struct hid_device *hdev)
2421 {
2422 int ret;
2423
2424 ret = mutex_lock_killable(&hdev->ll_open_lock);
2425 if (ret)
2426 return ret;
2427
2428 if (!hdev->ll_open_count++) {
2429 ret = hdev->ll_driver->open(hdev);
2430 if (ret)
2431 hdev->ll_open_count--;
2432
2433 if (hdev->driver->on_hid_hw_open)
2434 hdev->driver->on_hid_hw_open(hdev);
2435 }
2436
2437 mutex_unlock(&hdev->ll_open_lock);
2438 return ret;
2439 }
2440 EXPORT_SYMBOL_GPL(hid_hw_open);
2441
2442 /**
2443 * hid_hw_close - signal underlaying HW to stop delivering events
2444 *
2445 * @hdev: hid device
2446 *
2447 * This function indicates that we are not interested in the events
2448 * from this device anymore. Delivery of events may or may not stop,
2449 * depending on the number of users still outstanding.
2450 */
hid_hw_close(struct hid_device * hdev)2451 void hid_hw_close(struct hid_device *hdev)
2452 {
2453 mutex_lock(&hdev->ll_open_lock);
2454 if (!--hdev->ll_open_count) {
2455 hdev->ll_driver->close(hdev);
2456
2457 if (hdev->driver->on_hid_hw_close)
2458 hdev->driver->on_hid_hw_close(hdev);
2459 }
2460 mutex_unlock(&hdev->ll_open_lock);
2461 }
2462 EXPORT_SYMBOL_GPL(hid_hw_close);
2463
2464 /**
2465 * hid_hw_request - send report request to device
2466 *
2467 * @hdev: hid device
2468 * @report: report to send
2469 * @reqtype: hid request type
2470 */
hid_hw_request(struct hid_device * hdev,struct hid_report * report,enum hid_class_request reqtype)2471 void hid_hw_request(struct hid_device *hdev,
2472 struct hid_report *report, enum hid_class_request reqtype)
2473 {
2474 if (hdev->ll_driver->request)
2475 return hdev->ll_driver->request(hdev, report, reqtype);
2476
2477 __hid_request(hdev, report, reqtype);
2478 }
2479 EXPORT_SYMBOL_GPL(hid_hw_request);
2480
__hid_hw_raw_request(struct hid_device * hdev,unsigned char reportnum,__u8 * buf,size_t len,enum hid_report_type rtype,enum hid_class_request reqtype,u64 source,bool from_bpf)2481 int __hid_hw_raw_request(struct hid_device *hdev,
2482 unsigned char reportnum, __u8 *buf,
2483 size_t len, enum hid_report_type rtype,
2484 enum hid_class_request reqtype,
2485 u64 source, bool from_bpf)
2486 {
2487 unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
2488 int ret;
2489
2490 if (hdev->ll_driver->max_buffer_size)
2491 max_buffer_size = hdev->ll_driver->max_buffer_size;
2492
2493 if (len < 1 || len > max_buffer_size || !buf)
2494 return -EINVAL;
2495
2496 ret = dispatch_hid_bpf_raw_requests(hdev, reportnum, buf, len, rtype,
2497 reqtype, source, from_bpf);
2498 if (ret)
2499 return ret;
2500
2501 return hdev->ll_driver->raw_request(hdev, reportnum, buf, len,
2502 rtype, reqtype);
2503 }
2504
2505 /**
2506 * hid_hw_raw_request - send report request to device
2507 *
2508 * @hdev: hid device
2509 * @reportnum: report ID
2510 * @buf: in/out data to transfer
2511 * @len: length of buf
2512 * @rtype: HID report type
2513 * @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT
2514 *
2515 * Return: count of data transferred, negative if error
2516 *
2517 * Same behavior as hid_hw_request, but with raw buffers instead.
2518 */
hid_hw_raw_request(struct hid_device * hdev,unsigned char reportnum,__u8 * buf,size_t len,enum hid_report_type rtype,enum hid_class_request reqtype)2519 int hid_hw_raw_request(struct hid_device *hdev,
2520 unsigned char reportnum, __u8 *buf,
2521 size_t len, enum hid_report_type rtype, enum hid_class_request reqtype)
2522 {
2523 return __hid_hw_raw_request(hdev, reportnum, buf, len, rtype, reqtype, 0, false);
2524 }
2525 EXPORT_SYMBOL_GPL(hid_hw_raw_request);
2526
__hid_hw_output_report(struct hid_device * hdev,__u8 * buf,size_t len,u64 source,bool from_bpf)2527 int __hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len, u64 source,
2528 bool from_bpf)
2529 {
2530 unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
2531 int ret;
2532
2533 if (hdev->ll_driver->max_buffer_size)
2534 max_buffer_size = hdev->ll_driver->max_buffer_size;
2535
2536 if (len < 1 || len > max_buffer_size || !buf)
2537 return -EINVAL;
2538
2539 ret = dispatch_hid_bpf_output_report(hdev, buf, len, source, from_bpf);
2540 if (ret)
2541 return ret;
2542
2543 if (hdev->ll_driver->output_report)
2544 return hdev->ll_driver->output_report(hdev, buf, len);
2545
2546 return -ENOSYS;
2547 }
2548
2549 /**
2550 * hid_hw_output_report - send output report to device
2551 *
2552 * @hdev: hid device
2553 * @buf: raw data to transfer
2554 * @len: length of buf
2555 *
2556 * Return: count of data transferred, negative if error
2557 */
hid_hw_output_report(struct hid_device * hdev,__u8 * buf,size_t len)2558 int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len)
2559 {
2560 return __hid_hw_output_report(hdev, buf, len, 0, false);
2561 }
2562 EXPORT_SYMBOL_GPL(hid_hw_output_report);
2563
2564 #ifdef CONFIG_PM
hid_driver_suspend(struct hid_device * hdev,pm_message_t state)2565 int hid_driver_suspend(struct hid_device *hdev, pm_message_t state)
2566 {
2567 if (hdev->driver && hdev->driver->suspend)
2568 return hdev->driver->suspend(hdev, state);
2569
2570 return 0;
2571 }
2572 EXPORT_SYMBOL_GPL(hid_driver_suspend);
2573
hid_driver_reset_resume(struct hid_device * hdev)2574 int hid_driver_reset_resume(struct hid_device *hdev)
2575 {
2576 if (hdev->driver && hdev->driver->reset_resume)
2577 return hdev->driver->reset_resume(hdev);
2578
2579 return 0;
2580 }
2581 EXPORT_SYMBOL_GPL(hid_driver_reset_resume);
2582
hid_driver_resume(struct hid_device * hdev)2583 int hid_driver_resume(struct hid_device *hdev)
2584 {
2585 if (hdev->driver && hdev->driver->resume)
2586 return hdev->driver->resume(hdev);
2587
2588 return 0;
2589 }
2590 EXPORT_SYMBOL_GPL(hid_driver_resume);
2591 #endif /* CONFIG_PM */
2592
2593 struct hid_dynid {
2594 struct list_head list;
2595 struct hid_device_id id;
2596 };
2597
2598 /**
2599 * new_id_store - add a new HID device ID to this driver and re-probe devices
2600 * @drv: target device driver
2601 * @buf: buffer for scanning device ID data
2602 * @count: input size
2603 *
2604 * Adds a new dynamic hid device ID to this driver,
2605 * and causes the driver to probe for all devices again.
2606 */
new_id_store(struct device_driver * drv,const char * buf,size_t count)2607 static ssize_t new_id_store(struct device_driver *drv, const char *buf,
2608 size_t count)
2609 {
2610 struct hid_driver *hdrv = to_hid_driver(drv);
2611 struct hid_dynid *dynid;
2612 __u32 bus, vendor, product;
2613 unsigned long driver_data = 0;
2614 int ret;
2615
2616 ret = sscanf(buf, "%x %x %x %lx",
2617 &bus, &vendor, &product, &driver_data);
2618 if (ret < 3)
2619 return -EINVAL;
2620
2621 dynid = kzalloc_obj(*dynid);
2622 if (!dynid)
2623 return -ENOMEM;
2624
2625 dynid->id.bus = bus;
2626 dynid->id.group = HID_GROUP_ANY;
2627 dynid->id.vendor = vendor;
2628 dynid->id.product = product;
2629 dynid->id.driver_data = driver_data;
2630
2631 spin_lock(&hdrv->dyn_lock);
2632 list_add_tail(&dynid->list, &hdrv->dyn_list);
2633 spin_unlock(&hdrv->dyn_lock);
2634
2635 ret = driver_attach(&hdrv->driver);
2636
2637 return ret ? : count;
2638 }
2639 static DRIVER_ATTR_WO(new_id);
2640
2641 static struct attribute *hid_drv_attrs[] = {
2642 &driver_attr_new_id.attr,
2643 NULL,
2644 };
2645 ATTRIBUTE_GROUPS(hid_drv);
2646
hid_free_dynids(struct hid_driver * hdrv)2647 static void hid_free_dynids(struct hid_driver *hdrv)
2648 {
2649 struct hid_dynid *dynid, *n;
2650
2651 spin_lock(&hdrv->dyn_lock);
2652 list_for_each_entry_safe(dynid, n, &hdrv->dyn_list, list) {
2653 list_del(&dynid->list);
2654 kfree(dynid);
2655 }
2656 spin_unlock(&hdrv->dyn_lock);
2657 }
2658
hid_match_device(struct hid_device * hdev,struct hid_driver * hdrv)2659 const struct hid_device_id *hid_match_device(struct hid_device *hdev,
2660 struct hid_driver *hdrv)
2661 {
2662 struct hid_dynid *dynid;
2663
2664 spin_lock(&hdrv->dyn_lock);
2665 list_for_each_entry(dynid, &hdrv->dyn_list, list) {
2666 if (hid_match_one_id(hdev, &dynid->id)) {
2667 spin_unlock(&hdrv->dyn_lock);
2668 return &dynid->id;
2669 }
2670 }
2671 spin_unlock(&hdrv->dyn_lock);
2672
2673 return hid_match_id(hdev, hdrv->id_table);
2674 }
2675 EXPORT_SYMBOL_GPL(hid_match_device);
2676
hid_bus_match(struct device * dev,const struct device_driver * drv)2677 static int hid_bus_match(struct device *dev, const struct device_driver *drv)
2678 {
2679 struct hid_driver *hdrv = to_hid_driver(drv);
2680 struct hid_device *hdev = to_hid_device(dev);
2681
2682 return hid_match_device(hdev, hdrv) != NULL;
2683 }
2684
2685 /**
2686 * hid_compare_device_paths - check if both devices share the same path
2687 * @hdev_a: hid device
2688 * @hdev_b: hid device
2689 * @separator: char to use as separator
2690 *
2691 * Check if two devices share the same path up to the last occurrence of
2692 * the separator char. Both paths must exist (i.e., zero-length paths
2693 * don't match).
2694 */
hid_compare_device_paths(struct hid_device * hdev_a,struct hid_device * hdev_b,char separator)2695 bool hid_compare_device_paths(struct hid_device *hdev_a,
2696 struct hid_device *hdev_b, char separator)
2697 {
2698 int n1 = strrchr(hdev_a->phys, separator) - hdev_a->phys;
2699 int n2 = strrchr(hdev_b->phys, separator) - hdev_b->phys;
2700
2701 if (n1 != n2 || n1 <= 0 || n2 <= 0)
2702 return false;
2703
2704 return !strncmp(hdev_a->phys, hdev_b->phys, n1);
2705 }
2706 EXPORT_SYMBOL_GPL(hid_compare_device_paths);
2707
hid_check_device_match(struct hid_device * hdev,struct hid_driver * hdrv,const struct hid_device_id ** id)2708 static bool hid_check_device_match(struct hid_device *hdev,
2709 struct hid_driver *hdrv,
2710 const struct hid_device_id **id)
2711 {
2712 *id = hid_match_device(hdev, hdrv);
2713 if (!*id)
2714 return false;
2715
2716 if (hdrv->match)
2717 return hdrv->match(hdev, hid_ignore_special_drivers);
2718
2719 /*
2720 * hid-generic implements .match(), so we must be dealing with a
2721 * different HID driver here, and can simply check if
2722 * hid_ignore_special_drivers or HID_QUIRK_IGNORE_SPECIAL_DRIVER
2723 * are set or not.
2724 */
2725 return !hid_ignore_special_drivers && !(hdev->quirks & HID_QUIRK_IGNORE_SPECIAL_DRIVER);
2726 }
2727
hid_set_group(struct hid_device * hdev)2728 static void hid_set_group(struct hid_device *hdev)
2729 {
2730 int ret;
2731
2732 if (hid_ignore_special_drivers) {
2733 hdev->group = HID_GROUP_GENERIC;
2734 } else if (!hdev->group &&
2735 !(hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)) {
2736 ret = hid_scan_report(hdev);
2737 if (ret)
2738 hid_warn(hdev, "bad device descriptor (%d)\n", ret);
2739 }
2740 }
2741
__hid_device_probe(struct hid_device * hdev,struct hid_driver * hdrv)2742 static int __hid_device_probe(struct hid_device *hdev, struct hid_driver *hdrv)
2743 {
2744 const struct hid_device_id *id;
2745 int ret;
2746
2747 if (!hdev->bpf_rsize) {
2748 /* we keep a reference to the currently scanned report descriptor */
2749 const __u8 *original_rdesc = hdev->bpf_rdesc;
2750
2751 if (!original_rdesc)
2752 original_rdesc = hdev->dev_rdesc;
2753
2754 /* in case a bpf program gets detached, we need to free the old one */
2755 hid_free_bpf_rdesc(hdev);
2756
2757 /* keep this around so we know we called it once */
2758 hdev->bpf_rsize = hdev->dev_rsize;
2759
2760 /* call_hid_bpf_rdesc_fixup will always return a valid pointer */
2761 hdev->bpf_rdesc = call_hid_bpf_rdesc_fixup(hdev, hdev->dev_rdesc,
2762 &hdev->bpf_rsize);
2763
2764 /* the report descriptor changed, we need to re-scan it */
2765 if (original_rdesc != hdev->bpf_rdesc) {
2766 hdev->group = 0;
2767 hid_set_group(hdev);
2768 }
2769 }
2770
2771 if (!hid_check_device_match(hdev, hdrv, &id))
2772 return -ENODEV;
2773
2774 hdev->devres_group_id = devres_open_group(&hdev->dev, NULL, GFP_KERNEL);
2775 if (!hdev->devres_group_id)
2776 return -ENOMEM;
2777
2778 /* reset the quirks that has been previously set */
2779 hdev->quirks = hid_lookup_quirk(hdev);
2780 hdev->driver = hdrv;
2781
2782 if (hdrv->probe) {
2783 ret = hdrv->probe(hdev, id);
2784 } else { /* default probe */
2785 ret = hid_open_report(hdev);
2786 if (!ret)
2787 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
2788 }
2789
2790 /*
2791 * Note that we are not closing the devres group opened above so
2792 * even resources that were attached to the device after probe is
2793 * run are released when hid_device_remove() is executed. This is
2794 * needed as some drivers would allocate additional resources,
2795 * for example when updating firmware.
2796 */
2797
2798 if (ret) {
2799 devres_release_group(&hdev->dev, hdev->devres_group_id);
2800 hid_close_report(hdev);
2801 hdev->driver = NULL;
2802 }
2803
2804 return ret;
2805 }
2806
hid_device_probe(struct device * dev)2807 static int hid_device_probe(struct device *dev)
2808 {
2809 struct hid_device *hdev = to_hid_device(dev);
2810 struct hid_driver *hdrv = to_hid_driver(dev->driver);
2811 int ret = 0;
2812
2813 if (down_interruptible(&hdev->driver_input_lock))
2814 return -EINTR;
2815
2816 hdev->io_started = false;
2817 clear_bit(ffs(HID_STAT_REPROBED), &hdev->status);
2818
2819 if (!hdev->driver)
2820 ret = __hid_device_probe(hdev, hdrv);
2821
2822 if (!hdev->io_started)
2823 up(&hdev->driver_input_lock);
2824
2825 return ret;
2826 }
2827
hid_device_remove(struct device * dev)2828 static void hid_device_remove(struct device *dev)
2829 {
2830 struct hid_device *hdev = to_hid_device(dev);
2831 struct hid_driver *hdrv;
2832
2833 down(&hdev->driver_input_lock);
2834 hdev->io_started = false;
2835
2836 hdrv = hdev->driver;
2837 if (hdrv) {
2838 if (hdrv->remove)
2839 hdrv->remove(hdev);
2840 else /* default remove */
2841 hid_hw_stop(hdev);
2842
2843 /* Release all devres resources allocated by the driver */
2844 devres_release_group(&hdev->dev, hdev->devres_group_id);
2845
2846 hid_close_report(hdev);
2847 hdev->driver = NULL;
2848 }
2849
2850 if (!hdev->io_started)
2851 up(&hdev->driver_input_lock);
2852 }
2853
modalias_show(struct device * dev,struct device_attribute * a,char * buf)2854 static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
2855 char *buf)
2856 {
2857 struct hid_device *hdev = container_of(dev, struct hid_device, dev);
2858
2859 return sysfs_emit(buf, "hid:b%04Xg%04Xv%08Xp%08X\n",
2860 hdev->bus, hdev->group, hdev->vendor, hdev->product);
2861 }
2862 static DEVICE_ATTR_RO(modalias);
2863
2864 static struct attribute *hid_dev_attrs[] = {
2865 &dev_attr_modalias.attr,
2866 NULL,
2867 };
2868 static const struct bin_attribute *hid_dev_bin_attrs[] = {
2869 &bin_attr_report_descriptor,
2870 NULL
2871 };
2872 static const struct attribute_group hid_dev_group = {
2873 .attrs = hid_dev_attrs,
2874 .bin_attrs = hid_dev_bin_attrs,
2875 };
2876 __ATTRIBUTE_GROUPS(hid_dev);
2877
hid_uevent(const struct device * dev,struct kobj_uevent_env * env)2878 static int hid_uevent(const struct device *dev, struct kobj_uevent_env *env)
2879 {
2880 const struct hid_device *hdev = to_hid_device(dev);
2881
2882 if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X",
2883 hdev->bus, hdev->vendor, hdev->product))
2884 return -ENOMEM;
2885
2886 if (add_uevent_var(env, "HID_NAME=%s", hdev->name))
2887 return -ENOMEM;
2888
2889 if (add_uevent_var(env, "HID_PHYS=%s", hdev->phys))
2890 return -ENOMEM;
2891
2892 if (add_uevent_var(env, "HID_UNIQ=%s", hdev->uniq))
2893 return -ENOMEM;
2894
2895 if (add_uevent_var(env, "MODALIAS=hid:b%04Xg%04Xv%08Xp%08X",
2896 hdev->bus, hdev->group, hdev->vendor, hdev->product))
2897 return -ENOMEM;
2898 if (hdev->firmware_version) {
2899 if (add_uevent_var(env, "HID_FIRMWARE_VERSION=0x%04llX",
2900 hdev->firmware_version))
2901 return -ENOMEM;
2902 }
2903
2904 return 0;
2905 }
2906
2907 const struct bus_type hid_bus_type = {
2908 .name = "hid",
2909 .dev_groups = hid_dev_groups,
2910 .drv_groups = hid_drv_groups,
2911 .match = hid_bus_match,
2912 .probe = hid_device_probe,
2913 .remove = hid_device_remove,
2914 .uevent = hid_uevent,
2915 };
2916 EXPORT_SYMBOL(hid_bus_type);
2917
hid_add_device(struct hid_device * hdev)2918 int hid_add_device(struct hid_device *hdev)
2919 {
2920 static atomic_t id = ATOMIC_INIT(0);
2921 int ret;
2922
2923 if (WARN_ON(hdev->status & HID_STAT_ADDED))
2924 return -EBUSY;
2925
2926 hdev->quirks = hid_lookup_quirk(hdev);
2927
2928 /* we need to kill them here, otherwise they will stay allocated to
2929 * wait for coming driver */
2930 if (hid_ignore(hdev))
2931 return -ENODEV;
2932
2933 /*
2934 * Check for the mandatory transport channel.
2935 */
2936 if (!hdev->ll_driver->raw_request) {
2937 hid_err(hdev, "transport driver missing .raw_request()\n");
2938 return -EINVAL;
2939 }
2940
2941 /*
2942 * Read the device report descriptor once and use as template
2943 * for the driver-specific modifications.
2944 */
2945 ret = hdev->ll_driver->parse(hdev);
2946 if (ret)
2947 return ret;
2948 if (!hdev->dev_rdesc)
2949 return -ENODEV;
2950
2951 /*
2952 * Scan generic devices for group information
2953 */
2954 hid_set_group(hdev);
2955
2956 hdev->id = atomic_inc_return(&id);
2957
2958 /* XXX hack, any other cleaner solution after the driver core
2959 * is converted to allow more than 20 bytes as the device name? */
2960 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
2961 hdev->vendor, hdev->product, hdev->id);
2962
2963 hid_debug_register(hdev, dev_name(&hdev->dev));
2964 ret = device_add(&hdev->dev);
2965 if (!ret)
2966 hdev->status |= HID_STAT_ADDED;
2967 else
2968 hid_debug_unregister(hdev);
2969
2970 return ret;
2971 }
2972 EXPORT_SYMBOL_GPL(hid_add_device);
2973
2974 /**
2975 * hid_allocate_device - allocate new hid device descriptor
2976 *
2977 * Allocate and initialize hid device, so that hid_destroy_device might be
2978 * used to free it.
2979 *
2980 * New hid_device pointer is returned on success, otherwise ERR_PTR encoded
2981 * error value.
2982 */
hid_allocate_device(void)2983 struct hid_device *hid_allocate_device(void)
2984 {
2985 struct hid_device *hdev;
2986 int ret = -ENOMEM;
2987
2988 hdev = kzalloc_obj(*hdev);
2989 if (hdev == NULL)
2990 return ERR_PTR(ret);
2991
2992 device_initialize(&hdev->dev);
2993 hdev->dev.release = hid_device_release;
2994 hdev->dev.bus = &hid_bus_type;
2995 device_enable_async_suspend(&hdev->dev);
2996
2997 hid_close_report(hdev);
2998
2999 init_waitqueue_head(&hdev->debug_wait);
3000 INIT_LIST_HEAD(&hdev->debug_list);
3001 spin_lock_init(&hdev->debug_list_lock);
3002 sema_init(&hdev->driver_input_lock, 1);
3003 mutex_init(&hdev->ll_open_lock);
3004 kref_init(&hdev->ref);
3005
3006 #ifdef CONFIG_HID_BATTERY_STRENGTH
3007 INIT_LIST_HEAD(&hdev->batteries);
3008 #endif
3009
3010 ret = hid_bpf_device_init(hdev);
3011 if (ret)
3012 goto out_err;
3013
3014 return hdev;
3015
3016 out_err:
3017 hid_destroy_device(hdev);
3018 return ERR_PTR(ret);
3019 }
3020 EXPORT_SYMBOL_GPL(hid_allocate_device);
3021
hid_remove_device(struct hid_device * hdev)3022 static void hid_remove_device(struct hid_device *hdev)
3023 {
3024 if (hdev->status & HID_STAT_ADDED) {
3025 device_del(&hdev->dev);
3026 hid_debug_unregister(hdev);
3027 hdev->status &= ~HID_STAT_ADDED;
3028 }
3029 hid_free_bpf_rdesc(hdev);
3030 kfree(hdev->dev_rdesc);
3031 hdev->dev_rdesc = NULL;
3032 hdev->dev_rsize = 0;
3033 hdev->bpf_rsize = 0;
3034 }
3035
3036 /**
3037 * hid_destroy_device - free previously allocated device
3038 *
3039 * @hdev: hid device
3040 *
3041 * If you allocate hid_device through hid_allocate_device, you should ever
3042 * free by this function.
3043 */
hid_destroy_device(struct hid_device * hdev)3044 void hid_destroy_device(struct hid_device *hdev)
3045 {
3046 hid_bpf_destroy_device(hdev);
3047 hid_remove_device(hdev);
3048 put_device(&hdev->dev);
3049 }
3050 EXPORT_SYMBOL_GPL(hid_destroy_device);
3051
3052
__hid_bus_reprobe_drivers(struct device * dev,void * data)3053 static int __hid_bus_reprobe_drivers(struct device *dev, void *data)
3054 {
3055 struct hid_driver *hdrv = data;
3056 struct hid_device *hdev = to_hid_device(dev);
3057
3058 if (hdev->driver == hdrv &&
3059 !hdrv->match(hdev, hid_ignore_special_drivers) &&
3060 !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
3061 return device_reprobe(dev);
3062
3063 return 0;
3064 }
3065
__hid_bus_driver_added(struct device_driver * drv,void * data)3066 static int __hid_bus_driver_added(struct device_driver *drv, void *data)
3067 {
3068 struct hid_driver *hdrv = to_hid_driver(drv);
3069
3070 if (hdrv->match) {
3071 bus_for_each_dev(&hid_bus_type, NULL, hdrv,
3072 __hid_bus_reprobe_drivers);
3073 }
3074
3075 return 0;
3076 }
3077
__bus_removed_driver(struct device_driver * drv,void * data)3078 static int __bus_removed_driver(struct device_driver *drv, void *data)
3079 {
3080 return bus_rescan_devices(&hid_bus_type);
3081 }
3082
__hid_register_driver(struct hid_driver * hdrv,struct module * owner,const char * mod_name)3083 int __hid_register_driver(struct hid_driver *hdrv, struct module *owner,
3084 const char *mod_name)
3085 {
3086 int ret;
3087
3088 hdrv->driver.name = hdrv->name;
3089 hdrv->driver.bus = &hid_bus_type;
3090 hdrv->driver.owner = owner;
3091 hdrv->driver.mod_name = mod_name;
3092
3093 INIT_LIST_HEAD(&hdrv->dyn_list);
3094 spin_lock_init(&hdrv->dyn_lock);
3095
3096 ret = driver_register(&hdrv->driver);
3097
3098 if (ret == 0)
3099 bus_for_each_drv(&hid_bus_type, NULL, NULL,
3100 __hid_bus_driver_added);
3101
3102 return ret;
3103 }
3104 EXPORT_SYMBOL_GPL(__hid_register_driver);
3105
hid_unregister_driver(struct hid_driver * hdrv)3106 void hid_unregister_driver(struct hid_driver *hdrv)
3107 {
3108 driver_unregister(&hdrv->driver);
3109 hid_free_dynids(hdrv);
3110
3111 bus_for_each_drv(&hid_bus_type, NULL, hdrv, __bus_removed_driver);
3112 }
3113 EXPORT_SYMBOL_GPL(hid_unregister_driver);
3114
hid_check_keys_pressed(struct hid_device * hid)3115 int hid_check_keys_pressed(struct hid_device *hid)
3116 {
3117 struct hid_input *hidinput;
3118 int i;
3119
3120 if (!(hid->claimed & HID_CLAIMED_INPUT))
3121 return 0;
3122
3123 list_for_each_entry(hidinput, &hid->inputs, list) {
3124 for (i = 0; i < BITS_TO_LONGS(KEY_MAX); i++)
3125 if (hidinput->input->key[i])
3126 return 1;
3127 }
3128
3129 return 0;
3130 }
3131 EXPORT_SYMBOL_GPL(hid_check_keys_pressed);
3132
3133 #ifdef CONFIG_HID_BPF
3134 static const struct hid_ops __hid_ops = {
3135 .hid_get_report = hid_get_report,
3136 .hid_hw_raw_request = __hid_hw_raw_request,
3137 .hid_hw_output_report = __hid_hw_output_report,
3138 .hid_input_report = __hid_input_report,
3139 .owner = THIS_MODULE,
3140 .bus_type = &hid_bus_type,
3141 };
3142 #endif
3143
hid_init(void)3144 static int __init hid_init(void)
3145 {
3146 int ret;
3147
3148 ret = bus_register(&hid_bus_type);
3149 if (ret) {
3150 pr_err("can't register hid bus\n");
3151 goto err;
3152 }
3153
3154 #ifdef CONFIG_HID_BPF
3155 hid_ops = &__hid_ops;
3156 #endif
3157
3158 ret = hidraw_init();
3159 if (ret)
3160 goto err_bus;
3161
3162 hid_debug_init();
3163
3164 return 0;
3165 err_bus:
3166 bus_unregister(&hid_bus_type);
3167 err:
3168 return ret;
3169 }
3170
hid_exit(void)3171 static void __exit hid_exit(void)
3172 {
3173 #ifdef CONFIG_HID_BPF
3174 hid_ops = NULL;
3175 #endif
3176 hid_debug_exit();
3177 hidraw_exit();
3178 bus_unregister(&hid_bus_type);
3179 hid_quirks_exit(HID_BUS_ANY);
3180 }
3181
3182 module_init(hid_init);
3183 module_exit(hid_exit);
3184
3185 MODULE_AUTHOR("Andreas Gal");
3186 MODULE_AUTHOR("Vojtech Pavlik");
3187 MODULE_AUTHOR("Jiri Kosina");
3188 MODULE_DESCRIPTION("HID support for Linux");
3189 MODULE_LICENSE("GPL");
3190