1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * HID support for Linux
4 *
5 * Copyright (c) 1999 Andreas Gal
6 * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
7 * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
8 * Copyright (c) 2006-2012 Jiri Kosina
9 */
10
11 /*
12 */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/list.h>
21 #include <linux/mm.h>
22 #include <linux/spinlock.h>
23 #include <linux/unaligned.h>
24 #include <asm/byteorder.h>
25 #include <linux/input.h>
26 #include <linux/wait.h>
27 #include <linux/vmalloc.h>
28 #include <linux/sched.h>
29 #include <linux/semaphore.h>
30
31 #include <linux/hid.h>
32 #include <linux/hiddev.h>
33 #include <linux/hid-debug.h>
34 #include <linux/hidraw.h>
35
36 #include "hid-ids.h"
37
38 /*
39 * Version Information
40 */
41
42 #define DRIVER_DESC "HID core driver"
43
44 static int hid_ignore_special_drivers = 0;
45 module_param_named(ignore_special_drivers, hid_ignore_special_drivers, int, 0600);
46 MODULE_PARM_DESC(ignore_special_drivers, "Ignore any special drivers and handle all devices by generic driver");
47
48 /*
49 * Convert a signed n-bit integer to signed 32-bit integer.
50 */
51
snto32(__u32 value,unsigned int n)52 static s32 snto32(__u32 value, unsigned int n)
53 {
54 if (!value || !n)
55 return 0;
56
57 if (n > 32)
58 n = 32;
59
60 return sign_extend32(value, n - 1);
61 }
62
63 /*
64 * Convert a signed 32-bit integer to a signed n-bit integer.
65 */
66
s32ton(__s32 value,unsigned int n)67 static u32 s32ton(__s32 value, unsigned int n)
68 {
69 s32 a;
70
71 if (!value || !n)
72 return 0;
73
74 if (n > 32)
75 n = 32;
76
77 a = value >> (n - 1);
78 if (a && a != -1)
79 return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1;
80 return value & ((1 << n) - 1);
81 }
82
83 /*
84 * Register a new report for a device.
85 */
86
hid_register_report(struct hid_device * device,enum hid_report_type type,unsigned int id,unsigned int application)87 struct hid_report *hid_register_report(struct hid_device *device,
88 enum hid_report_type type, unsigned int id,
89 unsigned int application)
90 {
91 struct hid_report_enum *report_enum = device->report_enum + type;
92 struct hid_report *report;
93
94 if (id >= HID_MAX_IDS)
95 return NULL;
96 if (report_enum->report_id_hash[id])
97 return report_enum->report_id_hash[id];
98
99 report = kzalloc_obj(struct hid_report);
100 if (!report)
101 return NULL;
102
103 if (id != 0)
104 report_enum->numbered = 1;
105
106 report->id = id;
107 report->type = type;
108 report->size = 0;
109 report->device = device;
110 report->application = application;
111 report_enum->report_id_hash[id] = report;
112
113 list_add_tail(&report->list, &report_enum->report_list);
114 INIT_LIST_HEAD(&report->field_entry_list);
115
116 return report;
117 }
118 EXPORT_SYMBOL_GPL(hid_register_report);
119
120 /*
121 * Register a new field for this report.
122 */
123
hid_register_field(struct hid_report * report,unsigned usages)124 static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages)
125 {
126 struct hid_field *field;
127
128 if (report->maxfield == HID_MAX_FIELDS) {
129 hid_err(report->device, "too many fields in report\n");
130 return NULL;
131 }
132
133 field = kvzalloc((sizeof(struct hid_field) +
134 usages * sizeof(struct hid_usage) +
135 3 * usages * sizeof(unsigned int)), GFP_KERNEL);
136 if (!field)
137 return NULL;
138
139 field->index = report->maxfield++;
140 report->field[field->index] = field;
141 field->usage = (struct hid_usage *)(field + 1);
142 field->value = (s32 *)(field->usage + usages);
143 field->new_value = (s32 *)(field->value + usages);
144 field->usages_priorities = (s32 *)(field->new_value + usages);
145 field->report = report;
146
147 return field;
148 }
149
150 /*
151 * Open a collection. The type/usage is pushed on the stack.
152 */
153
open_collection(struct hid_parser * parser,unsigned type)154 static int open_collection(struct hid_parser *parser, unsigned type)
155 {
156 struct hid_collection *collection;
157 unsigned usage;
158 int collection_index;
159
160 usage = parser->local.usage[0];
161
162 if (parser->collection_stack_ptr == parser->collection_stack_size) {
163 unsigned int *collection_stack;
164 unsigned int new_size = parser->collection_stack_size +
165 HID_COLLECTION_STACK_SIZE;
166
167 collection_stack = krealloc(parser->collection_stack,
168 new_size * sizeof(unsigned int),
169 GFP_KERNEL);
170 if (!collection_stack)
171 return -ENOMEM;
172
173 parser->collection_stack = collection_stack;
174 parser->collection_stack_size = new_size;
175 }
176
177 if (parser->device->maxcollection == parser->device->collection_size) {
178 collection = kmalloc(
179 array3_size(sizeof(struct hid_collection),
180 parser->device->collection_size,
181 2),
182 GFP_KERNEL);
183 if (collection == NULL) {
184 hid_err(parser->device, "failed to reallocate collection array\n");
185 return -ENOMEM;
186 }
187 memcpy(collection, parser->device->collection,
188 sizeof(struct hid_collection) *
189 parser->device->collection_size);
190 memset(collection + parser->device->collection_size, 0,
191 sizeof(struct hid_collection) *
192 parser->device->collection_size);
193 kfree(parser->device->collection);
194 parser->device->collection = collection;
195 parser->device->collection_size *= 2;
196 }
197
198 parser->collection_stack[parser->collection_stack_ptr++] =
199 parser->device->maxcollection;
200
201 collection_index = parser->device->maxcollection++;
202 collection = parser->device->collection + collection_index;
203 collection->type = type;
204 collection->usage = usage;
205 collection->level = parser->collection_stack_ptr - 1;
206 collection->parent_idx = (collection->level == 0) ? -1 :
207 parser->collection_stack[collection->level - 1];
208
209 if (type == HID_COLLECTION_APPLICATION)
210 parser->device->maxapplication++;
211
212 return 0;
213 }
214
215 /*
216 * Close a collection.
217 */
218
close_collection(struct hid_parser * parser)219 static int close_collection(struct hid_parser *parser)
220 {
221 if (!parser->collection_stack_ptr) {
222 hid_err(parser->device, "collection stack underflow\n");
223 return -EINVAL;
224 }
225 parser->collection_stack_ptr--;
226 return 0;
227 }
228
229 /*
230 * Climb up the stack, search for the specified collection type
231 * and return the usage.
232 */
233
hid_lookup_collection(struct hid_parser * parser,unsigned type)234 static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
235 {
236 struct hid_collection *collection = parser->device->collection;
237 int n;
238
239 for (n = parser->collection_stack_ptr - 1; n >= 0; n--) {
240 unsigned index = parser->collection_stack[n];
241 if (collection[index].type == type)
242 return collection[index].usage;
243 }
244 return 0; /* we know nothing about this usage type */
245 }
246
247 /*
248 * Concatenate usage which defines 16 bits or less with the
249 * currently defined usage page to form a 32 bit usage
250 */
251
complete_usage(struct hid_parser * parser,unsigned int index)252 static void complete_usage(struct hid_parser *parser, unsigned int index)
253 {
254 parser->local.usage[index] &= 0xFFFF;
255 parser->local.usage[index] |=
256 (parser->global.usage_page & 0xFFFF) << 16;
257 }
258
259 /*
260 * Add a usage to the temporary parser table.
261 */
262
hid_add_usage(struct hid_parser * parser,unsigned usage,u8 size)263 static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size)
264 {
265 if (parser->local.usage_index >= HID_MAX_USAGES) {
266 hid_err(parser->device, "usage index exceeded\n");
267 return -1;
268 }
269 parser->local.usage[parser->local.usage_index] = usage;
270
271 /*
272 * If Usage item only includes usage id, concatenate it with
273 * currently defined usage page
274 */
275 if (size <= 2)
276 complete_usage(parser, parser->local.usage_index);
277
278 parser->local.usage_size[parser->local.usage_index] = size;
279 parser->local.collection_index[parser->local.usage_index] =
280 parser->collection_stack_ptr ?
281 parser->collection_stack[parser->collection_stack_ptr - 1] : 0;
282 parser->local.usage_index++;
283 return 0;
284 }
285
286 /*
287 * Register a new field for this report.
288 */
289
hid_add_field(struct hid_parser * parser,unsigned report_type,unsigned flags)290 static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsigned flags)
291 {
292 struct hid_report *report;
293 struct hid_field *field;
294 unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
295 unsigned int usages;
296 unsigned int offset;
297 unsigned int i;
298 unsigned int application;
299
300 application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION);
301
302 report = hid_register_report(parser->device, report_type,
303 parser->global.report_id, application);
304 if (!report) {
305 hid_err(parser->device, "hid_register_report failed\n");
306 return -1;
307 }
308
309 /* Handle both signed and unsigned cases properly */
310 if ((parser->global.logical_minimum < 0 &&
311 parser->global.logical_maximum <
312 parser->global.logical_minimum) ||
313 (parser->global.logical_minimum >= 0 &&
314 (__u32)parser->global.logical_maximum <
315 (__u32)parser->global.logical_minimum)) {
316 dbg_hid("logical range invalid 0x%x 0x%x\n",
317 parser->global.logical_minimum,
318 parser->global.logical_maximum);
319 return -1;
320 }
321
322 offset = report->size;
323 report->size += parser->global.report_size * parser->global.report_count;
324
325 if (parser->device->ll_driver->max_buffer_size)
326 max_buffer_size = parser->device->ll_driver->max_buffer_size;
327
328 /* Total size check: Allow for possible report index byte */
329 if (report->size > (max_buffer_size - 1) << 3) {
330 hid_err(parser->device, "report is too long\n");
331 return -1;
332 }
333
334 if (!parser->local.usage_index) /* Ignore padding fields */
335 return 0;
336
337 usages = max_t(unsigned, parser->local.usage_index,
338 parser->global.report_count);
339
340 field = hid_register_field(report, usages);
341 if (!field)
342 return 0;
343
344 field->physical = hid_lookup_collection(parser, HID_COLLECTION_PHYSICAL);
345 field->logical = hid_lookup_collection(parser, HID_COLLECTION_LOGICAL);
346 field->application = application;
347
348 for (i = 0; i < usages; i++) {
349 unsigned j = i;
350 /* Duplicate the last usage we parsed if we have excess values */
351 if (i >= parser->local.usage_index)
352 j = parser->local.usage_index - 1;
353 field->usage[i].hid = parser->local.usage[j];
354 field->usage[i].collection_index =
355 parser->local.collection_index[j];
356 field->usage[i].usage_index = i;
357 field->usage[i].resolution_multiplier = 1;
358 }
359
360 field->maxusage = usages;
361 field->flags = flags;
362 field->report_offset = offset;
363 field->report_type = report_type;
364 field->report_size = parser->global.report_size;
365 field->report_count = parser->global.report_count;
366 field->logical_minimum = parser->global.logical_minimum;
367 field->logical_maximum = parser->global.logical_maximum;
368 field->physical_minimum = parser->global.physical_minimum;
369 field->physical_maximum = parser->global.physical_maximum;
370 field->unit_exponent = parser->global.unit_exponent;
371 field->unit = parser->global.unit;
372
373 return 0;
374 }
375
376 /*
377 * Read data value from item.
378 */
379
item_udata(struct hid_item * item)380 static u32 item_udata(struct hid_item *item)
381 {
382 switch (item->size) {
383 case 1: return item->data.u8;
384 case 2: return item->data.u16;
385 case 4: return item->data.u32;
386 }
387 return 0;
388 }
389
item_sdata(struct hid_item * item)390 static s32 item_sdata(struct hid_item *item)
391 {
392 switch (item->size) {
393 case 1: return item->data.s8;
394 case 2: return item->data.s16;
395 case 4: return item->data.s32;
396 }
397 return 0;
398 }
399
400 /*
401 * Process a global item.
402 */
403
hid_parser_global(struct hid_parser * parser,struct hid_item * item)404 static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
405 {
406 __s32 raw_value;
407 switch (item->tag) {
408 case HID_GLOBAL_ITEM_TAG_PUSH:
409
410 if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) {
411 hid_err(parser->device, "global environment stack overflow\n");
412 return -1;
413 }
414
415 memcpy(parser->global_stack + parser->global_stack_ptr++,
416 &parser->global, sizeof(struct hid_global));
417 return 0;
418
419 case HID_GLOBAL_ITEM_TAG_POP:
420
421 if (!parser->global_stack_ptr) {
422 hid_err(parser->device, "global environment stack underflow\n");
423 return -1;
424 }
425
426 memcpy(&parser->global, parser->global_stack +
427 --parser->global_stack_ptr, sizeof(struct hid_global));
428 return 0;
429
430 case HID_GLOBAL_ITEM_TAG_USAGE_PAGE:
431 parser->global.usage_page = item_udata(item);
432 return 0;
433
434 case HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM:
435 parser->global.logical_minimum = item_sdata(item);
436 return 0;
437
438 case HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM:
439 if (parser->global.logical_minimum < 0)
440 parser->global.logical_maximum = item_sdata(item);
441 else
442 parser->global.logical_maximum = item_udata(item);
443 return 0;
444
445 case HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM:
446 parser->global.physical_minimum = item_sdata(item);
447 return 0;
448
449 case HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM:
450 if (parser->global.physical_minimum < 0)
451 parser->global.physical_maximum = item_sdata(item);
452 else
453 parser->global.physical_maximum = item_udata(item);
454 return 0;
455
456 case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT:
457 /* Many devices provide unit exponent as a two's complement
458 * nibble due to the common misunderstanding of HID
459 * specification 1.11, 6.2.2.7 Global Items. Attempt to handle
460 * both this and the standard encoding. */
461 raw_value = item_sdata(item);
462 if (!(raw_value & 0xfffffff0))
463 parser->global.unit_exponent = snto32(raw_value, 4);
464 else
465 parser->global.unit_exponent = raw_value;
466 return 0;
467
468 case HID_GLOBAL_ITEM_TAG_UNIT:
469 parser->global.unit = item_udata(item);
470 return 0;
471
472 case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
473 parser->global.report_size = item_udata(item);
474 if (parser->global.report_size > 256) {
475 hid_err(parser->device, "invalid report_size %d\n",
476 parser->global.report_size);
477 return -1;
478 }
479 return 0;
480
481 case HID_GLOBAL_ITEM_TAG_REPORT_COUNT:
482 parser->global.report_count = item_udata(item);
483 if (parser->global.report_count > HID_MAX_USAGES) {
484 hid_err(parser->device, "invalid report_count %d\n",
485 parser->global.report_count);
486 return -1;
487 }
488 return 0;
489
490 case HID_GLOBAL_ITEM_TAG_REPORT_ID:
491 parser->global.report_id = item_udata(item);
492 if (parser->global.report_id == 0 ||
493 parser->global.report_id >= HID_MAX_IDS) {
494 hid_err(parser->device, "report_id %u is invalid\n",
495 parser->global.report_id);
496 return -1;
497 }
498 return 0;
499
500 default:
501 hid_err(parser->device, "unknown global tag 0x%x\n", item->tag);
502 return -1;
503 }
504 }
505
506 /*
507 * Process a local item.
508 */
509
hid_parser_local(struct hid_parser * parser,struct hid_item * item)510 static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
511 {
512 __u32 data;
513 unsigned n;
514 __u32 count;
515
516 data = item_udata(item);
517
518 switch (item->tag) {
519 case HID_LOCAL_ITEM_TAG_DELIMITER:
520
521 if (data) {
522 /*
523 * We treat items before the first delimiter
524 * as global to all usage sets (branch 0).
525 * In the moment we process only these global
526 * items and the first delimiter set.
527 */
528 if (parser->local.delimiter_depth != 0) {
529 hid_err(parser->device, "nested delimiters\n");
530 return -1;
531 }
532 parser->local.delimiter_depth++;
533 parser->local.delimiter_branch++;
534 } else {
535 if (parser->local.delimiter_depth < 1) {
536 hid_err(parser->device, "bogus close delimiter\n");
537 return -1;
538 }
539 parser->local.delimiter_depth--;
540 }
541 return 0;
542
543 case HID_LOCAL_ITEM_TAG_USAGE:
544
545 if (parser->local.delimiter_branch > 1) {
546 dbg_hid("alternative usage ignored\n");
547 return 0;
548 }
549
550 return hid_add_usage(parser, data, item->size);
551
552 case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
553
554 if (parser->local.delimiter_branch > 1) {
555 dbg_hid("alternative usage ignored\n");
556 return 0;
557 }
558
559 parser->local.usage_minimum = data;
560 return 0;
561
562 case HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM:
563
564 if (parser->local.delimiter_branch > 1) {
565 dbg_hid("alternative usage ignored\n");
566 return 0;
567 }
568
569 count = data - parser->local.usage_minimum;
570 if (count + parser->local.usage_index >= HID_MAX_USAGES) {
571 /*
572 * We do not warn if the name is not set, we are
573 * actually pre-scanning the device.
574 */
575 if (dev_name(&parser->device->dev))
576 hid_warn(parser->device,
577 "ignoring exceeding usage max\n");
578 data = HID_MAX_USAGES - parser->local.usage_index +
579 parser->local.usage_minimum - 1;
580 if (data <= 0) {
581 hid_err(parser->device,
582 "no more usage index available\n");
583 return -1;
584 }
585 }
586
587 for (n = parser->local.usage_minimum; n <= data; n++)
588 if (hid_add_usage(parser, n, item->size)) {
589 dbg_hid("hid_add_usage failed\n");
590 return -1;
591 }
592 return 0;
593
594 default:
595
596 dbg_hid("unknown local item tag 0x%x\n", item->tag);
597 return 0;
598 }
599 return 0;
600 }
601
602 /*
603 * Concatenate Usage Pages into Usages where relevant:
604 * As per specification, 6.2.2.8: "When the parser encounters a main item it
605 * concatenates the last declared Usage Page with a Usage to form a complete
606 * usage value."
607 */
608
hid_concatenate_last_usage_page(struct hid_parser * parser)609 static void hid_concatenate_last_usage_page(struct hid_parser *parser)
610 {
611 int i;
612 unsigned int usage_page;
613 unsigned int current_page;
614
615 if (!parser->local.usage_index)
616 return;
617
618 usage_page = parser->global.usage_page;
619
620 /*
621 * Concatenate usage page again only if last declared Usage Page
622 * has not been already used in previous usages concatenation
623 */
624 for (i = parser->local.usage_index - 1; i >= 0; i--) {
625 if (parser->local.usage_size[i] > 2)
626 /* Ignore extended usages */
627 continue;
628
629 current_page = parser->local.usage[i] >> 16;
630 if (current_page == usage_page)
631 break;
632
633 complete_usage(parser, i);
634 }
635 }
636
637 /*
638 * Process a main item.
639 */
640
hid_parser_main(struct hid_parser * parser,struct hid_item * item)641 static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
642 {
643 __u32 data;
644 int ret;
645
646 hid_concatenate_last_usage_page(parser);
647
648 data = item_udata(item);
649
650 switch (item->tag) {
651 case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
652 ret = open_collection(parser, data & 0xff);
653 break;
654 case HID_MAIN_ITEM_TAG_END_COLLECTION:
655 ret = close_collection(parser);
656 break;
657 case HID_MAIN_ITEM_TAG_INPUT:
658 ret = hid_add_field(parser, HID_INPUT_REPORT, data);
659 break;
660 case HID_MAIN_ITEM_TAG_OUTPUT:
661 ret = hid_add_field(parser, HID_OUTPUT_REPORT, data);
662 break;
663 case HID_MAIN_ITEM_TAG_FEATURE:
664 ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
665 break;
666 default:
667 if (item->tag >= HID_MAIN_ITEM_TAG_RESERVED_MIN &&
668 item->tag <= HID_MAIN_ITEM_TAG_RESERVED_MAX)
669 hid_warn_ratelimited(parser->device, "reserved main item tag 0x%x\n", item->tag);
670 else
671 hid_warn_ratelimited(parser->device, "unknown main item tag 0x%x\n", item->tag);
672 ret = 0;
673 }
674
675 memset(&parser->local, 0, sizeof(parser->local)); /* Reset the local parser environment */
676
677 return ret;
678 }
679
680 /*
681 * Process a reserved item.
682 */
683
hid_parser_reserved(struct hid_parser * parser,struct hid_item * item)684 static int hid_parser_reserved(struct hid_parser *parser, struct hid_item *item)
685 {
686 dbg_hid("reserved item type, tag 0x%x\n", item->tag);
687 return 0;
688 }
689
690 /*
691 * Free a report and all registered fields. The field->usage and
692 * field->value table's are allocated behind the field, so we need
693 * only to free(field) itself.
694 */
695
hid_free_report(struct hid_report * report)696 static void hid_free_report(struct hid_report *report)
697 {
698 unsigned n;
699
700 kfree(report->field_entries);
701
702 for (n = 0; n < report->maxfield; n++)
703 kvfree(report->field[n]);
704 kfree(report);
705 }
706
707 /*
708 * Close report. This function returns the device
709 * state to the point prior to hid_open_report().
710 */
hid_close_report(struct hid_device * device)711 static void hid_close_report(struct hid_device *device)
712 {
713 unsigned i, j;
714
715 for (i = 0; i < HID_REPORT_TYPES; i++) {
716 struct hid_report_enum *report_enum = device->report_enum + i;
717
718 for (j = 0; j < HID_MAX_IDS; j++) {
719 struct hid_report *report = report_enum->report_id_hash[j];
720 if (report)
721 hid_free_report(report);
722 }
723 memset(report_enum, 0, sizeof(*report_enum));
724 INIT_LIST_HEAD(&report_enum->report_list);
725 }
726
727 /*
728 * If the HID driver had a rdesc_fixup() callback, dev->rdesc
729 * will be allocated by hid-core and needs to be freed.
730 * Otherwise, it is either equal to dev_rdesc or bpf_rdesc, in
731 * which cases it'll be freed later on device removal or destroy.
732 */
733 if (device->rdesc != device->dev_rdesc && device->rdesc != device->bpf_rdesc)
734 kfree(device->rdesc);
735 device->rdesc = NULL;
736 device->rsize = 0;
737
738 kfree(device->collection);
739 device->collection = NULL;
740 device->collection_size = 0;
741 device->maxcollection = 0;
742 device->maxapplication = 0;
743
744 device->status &= ~HID_STAT_PARSED;
745 }
746
hid_free_bpf_rdesc(struct hid_device * hdev)747 static inline void hid_free_bpf_rdesc(struct hid_device *hdev)
748 {
749 /* bpf_rdesc is either equal to dev_rdesc or allocated by call_hid_bpf_rdesc_fixup() */
750 if (hdev->bpf_rdesc != hdev->dev_rdesc)
751 kfree(hdev->bpf_rdesc);
752 hdev->bpf_rdesc = NULL;
753 }
754
755 /*
756 * Free a device structure, all reports, and all fields.
757 */
758
hiddev_free(struct kref * ref)759 void hiddev_free(struct kref *ref)
760 {
761 struct hid_device *hid = container_of(ref, struct hid_device, ref);
762
763 hid_close_report(hid);
764 hid_free_bpf_rdesc(hid);
765 kfree(hid->dev_rdesc);
766 kfree(hid);
767 }
768
hid_device_release(struct device * dev)769 static void hid_device_release(struct device *dev)
770 {
771 struct hid_device *hid = to_hid_device(dev);
772
773 kref_put(&hid->ref, hiddev_free);
774 }
775
776 /*
777 * Fetch a report description item from the data stream. We support long
778 * items, though they are not used yet.
779 */
780
fetch_item(const __u8 * start,const __u8 * end,struct hid_item * item)781 static const u8 *fetch_item(const __u8 *start, const __u8 *end, struct hid_item *item)
782 {
783 u8 b;
784
785 if ((end - start) <= 0)
786 return NULL;
787
788 b = *start++;
789
790 item->type = (b >> 2) & 3;
791 item->tag = (b >> 4) & 15;
792
793 if (item->tag == HID_ITEM_TAG_LONG) {
794
795 item->format = HID_ITEM_FORMAT_LONG;
796
797 if ((end - start) < 2)
798 return NULL;
799
800 item->size = *start++;
801 item->tag = *start++;
802
803 if ((end - start) < item->size)
804 return NULL;
805
806 item->data.longdata = start;
807 start += item->size;
808 return start;
809 }
810
811 item->format = HID_ITEM_FORMAT_SHORT;
812 item->size = BIT(b & 3) >> 1; /* 0, 1, 2, 3 -> 0, 1, 2, 4 */
813
814 if (end - start < item->size)
815 return NULL;
816
817 switch (item->size) {
818 case 0:
819 break;
820
821 case 1:
822 item->data.u8 = *start;
823 break;
824
825 case 2:
826 item->data.u16 = get_unaligned_le16(start);
827 break;
828
829 case 4:
830 item->data.u32 = get_unaligned_le32(start);
831 break;
832 }
833
834 return start + item->size;
835 }
836
hid_scan_input_usage(struct hid_parser * parser,u32 usage)837 static void hid_scan_input_usage(struct hid_parser *parser, u32 usage)
838 {
839 struct hid_device *hid = parser->device;
840
841 if (usage == HID_DG_CONTACTID)
842 hid->group = HID_GROUP_MULTITOUCH;
843 }
844
hid_scan_feature_usage(struct hid_parser * parser,u32 usage)845 static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage)
846 {
847 if (usage == 0xff0000c5 && parser->global.report_count == 256 &&
848 parser->global.report_size == 8)
849 parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
850
851 if (usage == 0xff0000c6 && parser->global.report_count == 1 &&
852 parser->global.report_size == 8)
853 parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
854 }
855
hid_scan_collection(struct hid_parser * parser,unsigned type)856 static void hid_scan_collection(struct hid_parser *parser, unsigned type)
857 {
858 struct hid_device *hid = parser->device;
859 int i;
860
861 if (((parser->global.usage_page << 16) == HID_UP_SENSOR) &&
862 (type == HID_COLLECTION_PHYSICAL ||
863 type == HID_COLLECTION_APPLICATION))
864 hid->group = HID_GROUP_SENSOR_HUB;
865
866 if (hid->vendor == USB_VENDOR_ID_MICROSOFT &&
867 hid->product == USB_DEVICE_ID_MS_POWER_COVER &&
868 hid->group == HID_GROUP_MULTITOUCH)
869 hid->group = HID_GROUP_GENERIC;
870
871 if ((parser->global.usage_page << 16) == HID_UP_GENDESK)
872 for (i = 0; i < parser->local.usage_index; i++)
873 if (parser->local.usage[i] == HID_GD_POINTER)
874 parser->scan_flags |= HID_SCAN_FLAG_GD_POINTER;
875
876 if ((parser->global.usage_page << 16) >= HID_UP_MSVENDOR)
877 parser->scan_flags |= HID_SCAN_FLAG_VENDOR_SPECIFIC;
878
879 if ((parser->global.usage_page << 16) == HID_UP_GOOGLEVENDOR)
880 for (i = 0; i < parser->local.usage_index; i++)
881 if (parser->local.usage[i] ==
882 (HID_UP_GOOGLEVENDOR | 0x0001))
883 parser->device->group =
884 HID_GROUP_VIVALDI;
885 }
886
hid_scan_main(struct hid_parser * parser,struct hid_item * item)887 static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
888 {
889 __u32 data;
890 int i;
891
892 hid_concatenate_last_usage_page(parser);
893
894 data = item_udata(item);
895
896 switch (item->tag) {
897 case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
898 hid_scan_collection(parser, data & 0xff);
899 break;
900 case HID_MAIN_ITEM_TAG_END_COLLECTION:
901 break;
902 case HID_MAIN_ITEM_TAG_INPUT:
903 /* ignore constant inputs, they will be ignored by hid-input */
904 if (data & HID_MAIN_ITEM_CONSTANT)
905 break;
906 for (i = 0; i < parser->local.usage_index; i++)
907 hid_scan_input_usage(parser, parser->local.usage[i]);
908 break;
909 case HID_MAIN_ITEM_TAG_OUTPUT:
910 break;
911 case HID_MAIN_ITEM_TAG_FEATURE:
912 for (i = 0; i < parser->local.usage_index; i++)
913 hid_scan_feature_usage(parser, parser->local.usage[i]);
914 break;
915 }
916
917 /* Reset the local parser environment */
918 memset(&parser->local, 0, sizeof(parser->local));
919
920 return 0;
921 }
922
923 /*
924 * Scan a report descriptor before the device is added to the bus.
925 * Sets device groups and other properties that determine what driver
926 * to load.
927 */
hid_scan_report(struct hid_device * hid)928 static int hid_scan_report(struct hid_device *hid)
929 {
930 struct hid_item item;
931 const __u8 *start = hid->dev_rdesc;
932 const __u8 *end = start + hid->dev_rsize;
933 static int (*dispatch_type[])(struct hid_parser *parser,
934 struct hid_item *item) = {
935 hid_scan_main,
936 hid_parser_global,
937 hid_parser_local,
938 hid_parser_reserved
939 };
940
941 struct hid_parser *parser __free(kvfree) = vzalloc(sizeof(*parser));
942 if (!parser)
943 return -ENOMEM;
944
945 parser->device = hid;
946 hid->group = HID_GROUP_GENERIC;
947
948 /*
949 * In case we are re-scanning after a BPF has been loaded,
950 * we need to use the bpf report descriptor, not the original one.
951 */
952 if (hid->bpf_rdesc && hid->bpf_rsize) {
953 start = hid->bpf_rdesc;
954 end = start + hid->bpf_rsize;
955 }
956
957 /*
958 * The parsing is simpler than the one in hid_open_report() as we should
959 * be robust against hid errors. Those errors will be raised by
960 * hid_open_report() anyway.
961 */
962 while ((start = fetch_item(start, end, &item)) != NULL)
963 dispatch_type[item.type](parser, &item);
964
965 /*
966 * Handle special flags set during scanning.
967 */
968 if ((parser->scan_flags & HID_SCAN_FLAG_MT_WIN_8) &&
969 (hid->group == HID_GROUP_MULTITOUCH))
970 hid->group = HID_GROUP_MULTITOUCH_WIN_8;
971
972 /*
973 * Vendor specific handlings
974 */
975 switch (hid->vendor) {
976 case USB_VENDOR_ID_WACOM:
977 hid->group = HID_GROUP_WACOM;
978 break;
979 case USB_VENDOR_ID_SYNAPTICS:
980 if (hid->group == HID_GROUP_GENERIC)
981 if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC)
982 && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER))
983 /*
984 * hid-rmi should take care of them,
985 * not hid-generic
986 */
987 hid->group = HID_GROUP_RMI;
988 break;
989 }
990
991 kfree(parser->collection_stack);
992 return 0;
993 }
994
995 /**
996 * hid_parse_report - parse device report
997 *
998 * @hid: hid device
999 * @start: report start
1000 * @size: report size
1001 *
1002 * Allocate the device report as read by the bus driver. This function should
1003 * only be called from parse() in ll drivers.
1004 */
hid_parse_report(struct hid_device * hid,const __u8 * start,unsigned size)1005 int hid_parse_report(struct hid_device *hid, const __u8 *start, unsigned size)
1006 {
1007 hid->dev_rdesc = kmemdup(start, size, GFP_KERNEL);
1008 if (!hid->dev_rdesc)
1009 return -ENOMEM;
1010 hid->dev_rsize = size;
1011 return 0;
1012 }
1013 EXPORT_SYMBOL_GPL(hid_parse_report);
1014
1015 static const char * const hid_report_names[] = {
1016 "HID_INPUT_REPORT",
1017 "HID_OUTPUT_REPORT",
1018 "HID_FEATURE_REPORT",
1019 };
1020 /**
1021 * hid_validate_values - validate existing device report's value indexes
1022 *
1023 * @hid: hid device
1024 * @type: which report type to examine
1025 * @id: which report ID to examine (0 for first)
1026 * @field_index: which report field to examine
1027 * @report_counts: expected number of values
1028 *
1029 * Validate the number of values in a given field of a given report, after
1030 * parsing.
1031 */
hid_validate_values(struct hid_device * hid,enum hid_report_type type,unsigned int id,unsigned int field_index,unsigned int report_counts)1032 struct hid_report *hid_validate_values(struct hid_device *hid,
1033 enum hid_report_type type, unsigned int id,
1034 unsigned int field_index,
1035 unsigned int report_counts)
1036 {
1037 struct hid_report *report;
1038
1039 if (type > HID_FEATURE_REPORT) {
1040 hid_err(hid, "invalid HID report type %u\n", type);
1041 return NULL;
1042 }
1043
1044 if (id >= HID_MAX_IDS) {
1045 hid_err(hid, "invalid HID report id %u\n", id);
1046 return NULL;
1047 }
1048
1049 /*
1050 * Explicitly not using hid_get_report() here since it depends on
1051 * ->numbered being checked, which may not always be the case when
1052 * drivers go to access report values.
1053 */
1054 if (id == 0) {
1055 /*
1056 * Validating on id 0 means we should examine the first
1057 * report in the list.
1058 */
1059 report = list_first_entry_or_null(
1060 &hid->report_enum[type].report_list,
1061 struct hid_report, list);
1062 } else {
1063 report = hid->report_enum[type].report_id_hash[id];
1064 }
1065 if (!report) {
1066 hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
1067 return NULL;
1068 }
1069 if (report->maxfield <= field_index) {
1070 hid_err(hid, "not enough fields in %s %u\n",
1071 hid_report_names[type], id);
1072 return NULL;
1073 }
1074 if (report->field[field_index]->report_count < report_counts) {
1075 hid_err(hid, "not enough values in %s %u field %u\n",
1076 hid_report_names[type], id, field_index);
1077 return NULL;
1078 }
1079 return report;
1080 }
1081 EXPORT_SYMBOL_GPL(hid_validate_values);
1082
hid_calculate_multiplier(struct hid_device * hid,struct hid_field * multiplier)1083 static int hid_calculate_multiplier(struct hid_device *hid,
1084 struct hid_field *multiplier)
1085 {
1086 int m;
1087 __s32 v = *multiplier->value;
1088 __s32 lmin = multiplier->logical_minimum;
1089 __s32 lmax = multiplier->logical_maximum;
1090 __s32 pmin = multiplier->physical_minimum;
1091 __s32 pmax = multiplier->physical_maximum;
1092
1093 /*
1094 * "Because OS implementations will generally divide the control's
1095 * reported count by the Effective Resolution Multiplier, designers
1096 * should take care not to establish a potential Effective
1097 * Resolution Multiplier of zero."
1098 * HID Usage Table, v1.12, Section 4.3.1, p31
1099 */
1100 if (lmax - lmin == 0)
1101 return 1;
1102 /*
1103 * Handling the unit exponent is left as an exercise to whoever
1104 * finds a device where that exponent is not 0.
1105 */
1106 m = ((v - lmin)/(lmax - lmin) * (pmax - pmin) + pmin);
1107 if (unlikely(multiplier->unit_exponent != 0)) {
1108 hid_warn(hid,
1109 "unsupported Resolution Multiplier unit exponent %d\n",
1110 multiplier->unit_exponent);
1111 }
1112
1113 /* There are no devices with an effective multiplier > 255 */
1114 if (unlikely(m == 0 || m > 255 || m < -255)) {
1115 hid_warn(hid, "unsupported Resolution Multiplier %d\n", m);
1116 m = 1;
1117 }
1118
1119 return m;
1120 }
1121
hid_apply_multiplier_to_field(struct hid_device * hid,struct hid_field * field,struct hid_collection * multiplier_collection,int effective_multiplier)1122 static void hid_apply_multiplier_to_field(struct hid_device *hid,
1123 struct hid_field *field,
1124 struct hid_collection *multiplier_collection,
1125 int effective_multiplier)
1126 {
1127 struct hid_collection *collection;
1128 struct hid_usage *usage;
1129 int i;
1130
1131 /*
1132 * If multiplier_collection is NULL, the multiplier applies
1133 * to all fields in the report.
1134 * Otherwise, it is the Logical Collection the multiplier applies to
1135 * but our field may be in a subcollection of that collection.
1136 */
1137 for (i = 0; i < field->maxusage; i++) {
1138 usage = &field->usage[i];
1139
1140 collection = &hid->collection[usage->collection_index];
1141 while (collection->parent_idx != -1 &&
1142 collection != multiplier_collection)
1143 collection = &hid->collection[collection->parent_idx];
1144
1145 if (collection->parent_idx != -1 ||
1146 multiplier_collection == NULL)
1147 usage->resolution_multiplier = effective_multiplier;
1148
1149 }
1150 }
1151
hid_apply_multiplier(struct hid_device * hid,struct hid_field * multiplier)1152 static void hid_apply_multiplier(struct hid_device *hid,
1153 struct hid_field *multiplier)
1154 {
1155 struct hid_report_enum *rep_enum;
1156 struct hid_report *rep;
1157 struct hid_field *field;
1158 struct hid_collection *multiplier_collection;
1159 int effective_multiplier;
1160 int i;
1161
1162 /*
1163 * "The Resolution Multiplier control must be contained in the same
1164 * Logical Collection as the control(s) to which it is to be applied.
1165 * If no Resolution Multiplier is defined, then the Resolution
1166 * Multiplier defaults to 1. If more than one control exists in a
1167 * Logical Collection, the Resolution Multiplier is associated with
1168 * all controls in the collection. If no Logical Collection is
1169 * defined, the Resolution Multiplier is associated with all
1170 * controls in the report."
1171 * HID Usage Table, v1.12, Section 4.3.1, p30
1172 *
1173 * Thus, search from the current collection upwards until we find a
1174 * logical collection. Then search all fields for that same parent
1175 * collection. Those are the fields the multiplier applies to.
1176 *
1177 * If we have more than one multiplier, it will overwrite the
1178 * applicable fields later.
1179 */
1180 multiplier_collection = &hid->collection[multiplier->usage->collection_index];
1181 while (multiplier_collection->parent_idx != -1 &&
1182 multiplier_collection->type != HID_COLLECTION_LOGICAL)
1183 multiplier_collection = &hid->collection[multiplier_collection->parent_idx];
1184 if (multiplier_collection->type != HID_COLLECTION_LOGICAL)
1185 multiplier_collection = NULL;
1186
1187 effective_multiplier = hid_calculate_multiplier(hid, multiplier);
1188
1189 rep_enum = &hid->report_enum[HID_INPUT_REPORT];
1190 list_for_each_entry(rep, &rep_enum->report_list, list) {
1191 for (i = 0; i < rep->maxfield; i++) {
1192 field = rep->field[i];
1193 hid_apply_multiplier_to_field(hid, field,
1194 multiplier_collection,
1195 effective_multiplier);
1196 }
1197 }
1198 }
1199
1200 /*
1201 * hid_setup_resolution_multiplier - set up all resolution multipliers
1202 *
1203 * @device: hid device
1204 *
1205 * Search for all Resolution Multiplier Feature Reports and apply their
1206 * value to all matching Input items. This only updates the internal struct
1207 * fields.
1208 *
1209 * The Resolution Multiplier is applied by the hardware. If the multiplier
1210 * is anything other than 1, the hardware will send pre-multiplied events
1211 * so that the same physical interaction generates an accumulated
1212 * accumulated_value = value * * multiplier
1213 * This may be achieved by sending
1214 * - "value * multiplier" for each event, or
1215 * - "value" but "multiplier" times as frequently, or
1216 * - a combination of the above
1217 * The only guarantee is that the same physical interaction always generates
1218 * an accumulated 'value * multiplier'.
1219 *
1220 * This function must be called before any event processing and after
1221 * any SetRequest to the Resolution Multiplier.
1222 */
hid_setup_resolution_multiplier(struct hid_device * hid)1223 void hid_setup_resolution_multiplier(struct hid_device *hid)
1224 {
1225 struct hid_report_enum *rep_enum;
1226 struct hid_report *rep;
1227 struct hid_usage *usage;
1228 int i, j;
1229
1230 rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
1231 list_for_each_entry(rep, &rep_enum->report_list, list) {
1232 for (i = 0; i < rep->maxfield; i++) {
1233 /* Ignore if report count is out of bounds. */
1234 if (rep->field[i]->report_count < 1)
1235 continue;
1236
1237 for (j = 0; j < rep->field[i]->maxusage; j++) {
1238 usage = &rep->field[i]->usage[j];
1239 if (usage->hid == HID_GD_RESOLUTION_MULTIPLIER)
1240 hid_apply_multiplier(hid,
1241 rep->field[i]);
1242 }
1243 }
1244 }
1245 }
1246 EXPORT_SYMBOL_GPL(hid_setup_resolution_multiplier);
1247
hid_parse_collections(struct hid_device * device)1248 static int hid_parse_collections(struct hid_device *device)
1249 {
1250 struct hid_item item;
1251 const u8 *start = device->rdesc;
1252 const u8 *end = start + device->rsize;
1253 const u8 *next;
1254 int ret;
1255 static typeof(hid_parser_main) (* const dispatch_type[]) = {
1256 hid_parser_main,
1257 hid_parser_global,
1258 hid_parser_local,
1259 hid_parser_reserved
1260 };
1261
1262 struct hid_parser *parser __free(kvfree) = vzalloc(sizeof(*parser));
1263 if (!parser)
1264 return -ENOMEM;
1265
1266 parser->device = device;
1267
1268 device->collection = kzalloc_objs(*device->collection,
1269 HID_DEFAULT_NUM_COLLECTIONS);
1270 if (!device->collection)
1271 return -ENOMEM;
1272
1273 device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
1274 for (unsigned int i = 0; i < HID_DEFAULT_NUM_COLLECTIONS; i++)
1275 device->collection[i].parent_idx = -1;
1276
1277 ret = -EINVAL;
1278 if (start == end) {
1279 hid_err(device, "rejecting 0-sized report descriptor\n");
1280 goto out;
1281 }
1282
1283 while ((next = fetch_item(start, end, &item)) != NULL) {
1284 start = next;
1285
1286 if (item.format != HID_ITEM_FORMAT_SHORT) {
1287 hid_err(device, "unexpected long global item\n");
1288 goto out;
1289 }
1290
1291 if (dispatch_type[item.type](parser, &item)) {
1292 hid_err(device, "item %u %u %u %u parsing failed\n",
1293 item.format,
1294 (unsigned int)item.size,
1295 (unsigned int)item.type,
1296 (unsigned int)item.tag);
1297 goto out;
1298 }
1299 }
1300
1301 if (start != end) {
1302 hid_err(device, "item fetching failed at offset %u/%u\n",
1303 device->rsize - (unsigned int)(end - start),
1304 device->rsize);
1305 goto out;
1306 }
1307
1308 if (parser->collection_stack_ptr) {
1309 hid_err(device, "unbalanced collection at end of report description\n");
1310 goto out;
1311 }
1312
1313 if (parser->local.delimiter_depth) {
1314 hid_err(device, "unbalanced delimiter at end of report description\n");
1315 goto out;
1316 }
1317
1318 /*
1319 * fetch initial values in case the device's
1320 * default multiplier isn't the recommended 1
1321 */
1322 hid_setup_resolution_multiplier(device);
1323
1324 device->status |= HID_STAT_PARSED;
1325 ret = 0;
1326
1327 out:
1328 kfree(parser->collection_stack);
1329 return ret;
1330 }
1331
1332 /**
1333 * hid_open_report - open a driver-specific device report
1334 *
1335 * @device: hid device
1336 *
1337 * Parse a report description into a hid_device structure. Reports are
1338 * enumerated, fields are attached to these reports.
1339 * 0 returned on success, otherwise nonzero error value.
1340 *
1341 * This function (or the equivalent hid_parse() macro) should only be
1342 * called from probe() in drivers, before starting the device.
1343 */
hid_open_report(struct hid_device * device)1344 int hid_open_report(struct hid_device *device)
1345 {
1346 unsigned int size;
1347 const u8 *start;
1348 int error;
1349
1350 if (WARN_ON(device->status & HID_STAT_PARSED))
1351 return -EBUSY;
1352
1353 start = device->bpf_rdesc;
1354 if (WARN_ON(!start))
1355 return -ENODEV;
1356 size = device->bpf_rsize;
1357
1358 if (device->driver->report_fixup) {
1359 /*
1360 * device->driver->report_fixup() needs to work
1361 * on a copy of our report descriptor so it can
1362 * change it.
1363 */
1364 u8 *buf __free(kfree) = kmemdup(start, size, GFP_KERNEL);
1365
1366 if (!buf)
1367 return -ENOMEM;
1368
1369 start = device->driver->report_fixup(device, buf, &size);
1370
1371 /*
1372 * The second kmemdup is required in case report_fixup() returns
1373 * a static read-only memory, but we have no idea if that memory
1374 * needs to be cleaned up or not at the end.
1375 */
1376 start = kmemdup(start, size, GFP_KERNEL);
1377 if (!start)
1378 return -ENOMEM;
1379 }
1380
1381 device->rdesc = start;
1382 device->rsize = size;
1383
1384 error = hid_parse_collections(device);
1385 if (error) {
1386 hid_close_report(device);
1387 return error;
1388 }
1389
1390 return 0;
1391 }
1392 EXPORT_SYMBOL_GPL(hid_open_report);
1393
1394 /*
1395 * Extract/implement a data field from/to a little endian report (bit array).
1396 *
1397 * Code sort-of follows HID spec:
1398 * http://www.usb.org/developers/hidpage/HID1_11.pdf
1399 *
1400 * While the USB HID spec allows unlimited length bit fields in "report
1401 * descriptors", most devices never use more than 16 bits.
1402 * One model of UPS is claimed to report "LINEV" as a 32-bit field.
1403 * Search linux-kernel and linux-usb-devel archives for "hid-core extract".
1404 */
1405
__extract(u8 * report,unsigned offset,int n)1406 static u32 __extract(u8 *report, unsigned offset, int n)
1407 {
1408 unsigned int idx = offset / 8;
1409 unsigned int bit_nr = 0;
1410 unsigned int bit_shift = offset % 8;
1411 int bits_to_copy = 8 - bit_shift;
1412 u32 value = 0;
1413 u32 mask = n < 32 ? (1U << n) - 1 : ~0U;
1414
1415 while (n > 0) {
1416 value |= ((u32)report[idx] >> bit_shift) << bit_nr;
1417 n -= bits_to_copy;
1418 bit_nr += bits_to_copy;
1419 bits_to_copy = 8;
1420 bit_shift = 0;
1421 idx++;
1422 }
1423
1424 return value & mask;
1425 }
1426
hid_field_extract(const struct hid_device * hid,u8 * report,unsigned offset,unsigned n)1427 u32 hid_field_extract(const struct hid_device *hid, u8 *report,
1428 unsigned offset, unsigned n)
1429 {
1430 if (n > 32) {
1431 hid_warn_once(hid, "%s() called with n (%d) > 32! (%s)\n",
1432 __func__, n, current->comm);
1433 n = 32;
1434 }
1435
1436 return __extract(report, offset, n);
1437 }
1438 EXPORT_SYMBOL_GPL(hid_field_extract);
1439
1440 /*
1441 * "implement" : set bits in a little endian bit stream.
1442 * Same concepts as "extract" (see comments above).
1443 * The data mangled in the bit stream remains in little endian
1444 * order the whole time. It make more sense to talk about
1445 * endianness of register values by considering a register
1446 * a "cached" copy of the little endian bit stream.
1447 */
1448
__implement(u8 * report,unsigned offset,int n,u32 value)1449 static void __implement(u8 *report, unsigned offset, int n, u32 value)
1450 {
1451 unsigned int idx = offset / 8;
1452 unsigned int bit_shift = offset % 8;
1453 int bits_to_set = 8 - bit_shift;
1454
1455 while (n - bits_to_set >= 0) {
1456 report[idx] &= ~(0xff << bit_shift);
1457 report[idx] |= value << bit_shift;
1458 value >>= bits_to_set;
1459 n -= bits_to_set;
1460 bits_to_set = 8;
1461 bit_shift = 0;
1462 idx++;
1463 }
1464
1465 /* last nibble */
1466 if (n) {
1467 u8 bit_mask = ((1U << n) - 1);
1468 report[idx] &= ~(bit_mask << bit_shift);
1469 report[idx] |= value << bit_shift;
1470 }
1471 }
1472
implement(const struct hid_device * hid,u8 * report,unsigned offset,unsigned n,u32 value)1473 static void implement(const struct hid_device *hid, u8 *report,
1474 unsigned offset, unsigned n, u32 value)
1475 {
1476 if (unlikely(n > 32)) {
1477 hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n",
1478 __func__, n, current->comm);
1479 n = 32;
1480 } else if (n < 32) {
1481 u32 m = (1U << n) - 1;
1482
1483 if (unlikely(value > m)) {
1484 hid_warn(hid,
1485 "%s() called with too large value %d (n: %d)! (%s)\n",
1486 __func__, value, n, current->comm);
1487 value &= m;
1488 }
1489 }
1490
1491 __implement(report, offset, n, value);
1492 }
1493
1494 /*
1495 * Search an array for a value.
1496 */
1497
search(__s32 * array,__s32 value,unsigned n)1498 static int search(__s32 *array, __s32 value, unsigned n)
1499 {
1500 while (n--) {
1501 if (*array++ == value)
1502 return 0;
1503 }
1504 return -1;
1505 }
1506
1507 /**
1508 * hid_match_report - check if driver's raw_event should be called
1509 *
1510 * @hid: hid device
1511 * @report: hid report to match against
1512 *
1513 * compare hid->driver->report_table->report_type to report->type
1514 */
hid_match_report(struct hid_device * hid,struct hid_report * report)1515 static int hid_match_report(struct hid_device *hid, struct hid_report *report)
1516 {
1517 const struct hid_report_id *id = hid->driver->report_table;
1518
1519 if (!id) /* NULL means all */
1520 return 1;
1521
1522 for (; id->report_type != HID_TERMINATOR; id++)
1523 if (id->report_type == HID_ANY_ID ||
1524 id->report_type == report->type)
1525 return 1;
1526 return 0;
1527 }
1528
1529 /**
1530 * hid_match_usage - check if driver's event should be called
1531 *
1532 * @hid: hid device
1533 * @usage: usage to match against
1534 *
1535 * compare hid->driver->usage_table->usage_{type,code} to
1536 * usage->usage_{type,code}
1537 */
hid_match_usage(struct hid_device * hid,struct hid_usage * usage)1538 static int hid_match_usage(struct hid_device *hid, struct hid_usage *usage)
1539 {
1540 const struct hid_usage_id *id = hid->driver->usage_table;
1541
1542 if (!id) /* NULL means all */
1543 return 1;
1544
1545 for (; id->usage_type != HID_ANY_ID - 1; id++)
1546 if ((id->usage_hid == HID_ANY_ID ||
1547 id->usage_hid == usage->hid) &&
1548 (id->usage_type == HID_ANY_ID ||
1549 id->usage_type == usage->type) &&
1550 (id->usage_code == HID_ANY_ID ||
1551 id->usage_code == usage->code))
1552 return 1;
1553 return 0;
1554 }
1555
hid_process_event(struct hid_device * hid,struct hid_field * field,struct hid_usage * usage,__s32 value,int interrupt)1556 static void hid_process_event(struct hid_device *hid, struct hid_field *field,
1557 struct hid_usage *usage, __s32 value, int interrupt)
1558 {
1559 struct hid_driver *hdrv = hid->driver;
1560 int ret;
1561
1562 if (!list_empty(&hid->debug_list))
1563 hid_dump_input(hid, usage, value);
1564
1565 if (hdrv && hdrv->event && hid_match_usage(hid, usage)) {
1566 ret = hdrv->event(hid, field, usage, value);
1567 if (ret != 0) {
1568 if (ret < 0)
1569 hid_err(hid, "%s's event failed with %d\n",
1570 hdrv->name, ret);
1571 return;
1572 }
1573 }
1574
1575 if (hid->claimed & HID_CLAIMED_INPUT)
1576 hidinput_hid_event(hid, field, usage, value);
1577 if (hid->claimed & HID_CLAIMED_HIDDEV && interrupt && hid->hiddev_hid_event)
1578 hid->hiddev_hid_event(hid, field, usage, value);
1579 }
1580
1581 /*
1582 * Checks if the given value is valid within this field
1583 */
hid_array_value_is_valid(struct hid_field * field,__s32 value)1584 static inline int hid_array_value_is_valid(struct hid_field *field,
1585 __s32 value)
1586 {
1587 __s32 min = field->logical_minimum;
1588
1589 /*
1590 * Value needs to be between logical min and max, and
1591 * (value - min) is used as an index in the usage array.
1592 * This array is of size field->maxusage
1593 */
1594 return value >= min &&
1595 value <= field->logical_maximum &&
1596 value - min < field->maxusage;
1597 }
1598
1599 /*
1600 * Fetch the field from the data. The field content is stored for next
1601 * report processing (we do differential reporting to the layer).
1602 */
hid_input_fetch_field(struct hid_device * hid,struct hid_field * field,__u8 * data)1603 static void hid_input_fetch_field(struct hid_device *hid,
1604 struct hid_field *field,
1605 __u8 *data)
1606 {
1607 unsigned n;
1608 unsigned count = field->report_count;
1609 unsigned offset = field->report_offset;
1610 unsigned size = field->report_size;
1611 __s32 min = field->logical_minimum;
1612 __s32 *value;
1613
1614 value = field->new_value;
1615 memset(value, 0, count * sizeof(__s32));
1616 field->ignored = false;
1617
1618 for (n = 0; n < count; n++) {
1619
1620 value[n] = min < 0 ?
1621 snto32(hid_field_extract(hid, data, offset + n * size,
1622 size), size) :
1623 hid_field_extract(hid, data, offset + n * size, size);
1624
1625 /* Ignore report if ErrorRollOver */
1626 if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
1627 hid_array_value_is_valid(field, value[n]) &&
1628 field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1) {
1629 field->ignored = true;
1630 return;
1631 }
1632 }
1633 }
1634
1635 /*
1636 * Process a received variable field.
1637 */
1638
hid_input_var_field(struct hid_device * hid,struct hid_field * field,int interrupt)1639 static void hid_input_var_field(struct hid_device *hid,
1640 struct hid_field *field,
1641 int interrupt)
1642 {
1643 unsigned int count = field->report_count;
1644 __s32 *value = field->new_value;
1645 unsigned int n;
1646
1647 for (n = 0; n < count; n++)
1648 hid_process_event(hid,
1649 field,
1650 &field->usage[n],
1651 value[n],
1652 interrupt);
1653
1654 memcpy(field->value, value, count * sizeof(__s32));
1655 }
1656
1657 /*
1658 * Process a received array field. The field content is stored for
1659 * next report processing (we do differential reporting to the layer).
1660 */
1661
hid_input_array_field(struct hid_device * hid,struct hid_field * field,int interrupt)1662 static void hid_input_array_field(struct hid_device *hid,
1663 struct hid_field *field,
1664 int interrupt)
1665 {
1666 unsigned int n;
1667 unsigned int count = field->report_count;
1668 __s32 min = field->logical_minimum;
1669 __s32 *value;
1670
1671 value = field->new_value;
1672
1673 /* ErrorRollOver */
1674 if (field->ignored)
1675 return;
1676
1677 for (n = 0; n < count; n++) {
1678 if (hid_array_value_is_valid(field, field->value[n]) &&
1679 search(value, field->value[n], count))
1680 hid_process_event(hid,
1681 field,
1682 &field->usage[field->value[n] - min],
1683 0,
1684 interrupt);
1685
1686 if (hid_array_value_is_valid(field, value[n]) &&
1687 search(field->value, value[n], count))
1688 hid_process_event(hid,
1689 field,
1690 &field->usage[value[n] - min],
1691 1,
1692 interrupt);
1693 }
1694
1695 memcpy(field->value, value, count * sizeof(__s32));
1696 }
1697
1698 /*
1699 * Analyse a received report, and fetch the data from it. The field
1700 * content is stored for next report processing (we do differential
1701 * reporting to the layer).
1702 */
hid_process_report(struct hid_device * hid,struct hid_report * report,__u8 * data,int interrupt)1703 static void hid_process_report(struct hid_device *hid,
1704 struct hid_report *report,
1705 __u8 *data,
1706 int interrupt)
1707 {
1708 unsigned int a;
1709 struct hid_field_entry *entry;
1710 struct hid_field *field;
1711
1712 /* first retrieve all incoming values in data */
1713 for (a = 0; a < report->maxfield; a++)
1714 hid_input_fetch_field(hid, report->field[a], data);
1715
1716 if (!list_empty(&report->field_entry_list)) {
1717 /* INPUT_REPORT, we have a priority list of fields */
1718 list_for_each_entry(entry,
1719 &report->field_entry_list,
1720 list) {
1721 field = entry->field;
1722
1723 if (field->flags & HID_MAIN_ITEM_VARIABLE)
1724 hid_process_event(hid,
1725 field,
1726 &field->usage[entry->index],
1727 field->new_value[entry->index],
1728 interrupt);
1729 else
1730 hid_input_array_field(hid, field, interrupt);
1731 }
1732
1733 /* we need to do the memcpy at the end for var items */
1734 for (a = 0; a < report->maxfield; a++) {
1735 field = report->field[a];
1736
1737 if (field->flags & HID_MAIN_ITEM_VARIABLE)
1738 memcpy(field->value, field->new_value,
1739 field->report_count * sizeof(__s32));
1740 }
1741 } else {
1742 /* FEATURE_REPORT, regular processing */
1743 for (a = 0; a < report->maxfield; a++) {
1744 field = report->field[a];
1745
1746 if (field->flags & HID_MAIN_ITEM_VARIABLE)
1747 hid_input_var_field(hid, field, interrupt);
1748 else
1749 hid_input_array_field(hid, field, interrupt);
1750 }
1751 }
1752 }
1753
1754 /*
1755 * Insert a given usage_index in a field in the list
1756 * of processed usages in the report.
1757 *
1758 * The elements of lower priority score are processed
1759 * first.
1760 */
__hid_insert_field_entry(struct hid_device * hid,struct hid_report * report,struct hid_field_entry * entry,struct hid_field * field,unsigned int usage_index)1761 static void __hid_insert_field_entry(struct hid_device *hid,
1762 struct hid_report *report,
1763 struct hid_field_entry *entry,
1764 struct hid_field *field,
1765 unsigned int usage_index)
1766 {
1767 struct hid_field_entry *next;
1768
1769 entry->field = field;
1770 entry->index = usage_index;
1771 entry->priority = field->usages_priorities[usage_index];
1772
1773 /* insert the element at the correct position */
1774 list_for_each_entry(next,
1775 &report->field_entry_list,
1776 list) {
1777 /*
1778 * the priority of our element is strictly higher
1779 * than the next one, insert it before
1780 */
1781 if (entry->priority > next->priority) {
1782 list_add_tail(&entry->list, &next->list);
1783 return;
1784 }
1785 }
1786
1787 /* lowest priority score: insert at the end */
1788 list_add_tail(&entry->list, &report->field_entry_list);
1789 }
1790
hid_report_process_ordering(struct hid_device * hid,struct hid_report * report)1791 static void hid_report_process_ordering(struct hid_device *hid,
1792 struct hid_report *report)
1793 {
1794 struct hid_field *field;
1795 struct hid_field_entry *entries;
1796 unsigned int a, u, usages;
1797 unsigned int count = 0;
1798
1799 /* count the number of individual fields in the report */
1800 for (a = 0; a < report->maxfield; a++) {
1801 field = report->field[a];
1802
1803 if (field->flags & HID_MAIN_ITEM_VARIABLE)
1804 count += field->report_count;
1805 else
1806 count++;
1807 }
1808
1809 /* allocate the memory to process the fields */
1810 entries = kzalloc_objs(*entries, count);
1811 if (!entries)
1812 return;
1813
1814 report->field_entries = entries;
1815
1816 /*
1817 * walk through all fields in the report and
1818 * store them by priority order in report->field_entry_list
1819 *
1820 * - Var elements are individualized (field + usage_index)
1821 * - Arrays are taken as one, we can not chose an order for them
1822 */
1823 usages = 0;
1824 for (a = 0; a < report->maxfield; a++) {
1825 field = report->field[a];
1826
1827 if (field->flags & HID_MAIN_ITEM_VARIABLE) {
1828 for (u = 0; u < field->report_count; u++) {
1829 __hid_insert_field_entry(hid, report,
1830 &entries[usages],
1831 field, u);
1832 usages++;
1833 }
1834 } else {
1835 __hid_insert_field_entry(hid, report, &entries[usages],
1836 field, 0);
1837 usages++;
1838 }
1839 }
1840 }
1841
hid_process_ordering(struct hid_device * hid)1842 static void hid_process_ordering(struct hid_device *hid)
1843 {
1844 struct hid_report *report;
1845 struct hid_report_enum *report_enum = &hid->report_enum[HID_INPUT_REPORT];
1846
1847 list_for_each_entry(report, &report_enum->report_list, list)
1848 hid_report_process_ordering(hid, report);
1849 }
1850
1851 /*
1852 * Output the field into the report.
1853 */
1854
hid_output_field(const struct hid_device * hid,struct hid_field * field,__u8 * data)1855 static void hid_output_field(const struct hid_device *hid,
1856 struct hid_field *field, __u8 *data)
1857 {
1858 unsigned count = field->report_count;
1859 unsigned offset = field->report_offset;
1860 unsigned size = field->report_size;
1861 unsigned n;
1862
1863 for (n = 0; n < count; n++) {
1864 if (field->logical_minimum < 0) /* signed values */
1865 implement(hid, data, offset + n * size, size,
1866 s32ton(field->value[n], size));
1867 else /* unsigned values */
1868 implement(hid, data, offset + n * size, size,
1869 field->value[n]);
1870 }
1871 }
1872
1873 /*
1874 * Compute the size of a report.
1875 */
hid_compute_report_size(struct hid_report * report)1876 static size_t hid_compute_report_size(struct hid_report *report)
1877 {
1878 if (report->size)
1879 return ((report->size - 1) >> 3) + 1;
1880
1881 return 0;
1882 }
1883
1884 /*
1885 * Create a report. 'data' has to be allocated using
1886 * hid_alloc_report_buf() so that it has proper size.
1887 */
1888
hid_output_report(struct hid_report * report,__u8 * data)1889 void hid_output_report(struct hid_report *report, __u8 *data)
1890 {
1891 unsigned n;
1892
1893 if (report->id > 0)
1894 *data++ = report->id;
1895
1896 memset(data, 0, hid_compute_report_size(report));
1897 for (n = 0; n < report->maxfield; n++)
1898 hid_output_field(report->device, report->field[n], data);
1899 }
1900 EXPORT_SYMBOL_GPL(hid_output_report);
1901
1902 /*
1903 * Allocator for buffer that is going to be passed to hid_output_report()
1904 */
hid_alloc_report_buf(struct hid_report * report,gfp_t flags)1905 u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
1906 {
1907 /*
1908 * 7 extra bytes are necessary to achieve proper functionality
1909 * of implement() working on 8 byte chunks
1910 * 1 extra byte for the report ID if it is null (not used) so
1911 * we can reserve that extra byte in the first position of the buffer
1912 * when sending it to .raw_request()
1913 */
1914
1915 u32 len = hid_report_len(report) + 7 + (report->id == 0);
1916
1917 return kzalloc(len, flags);
1918 }
1919 EXPORT_SYMBOL_GPL(hid_alloc_report_buf);
1920
1921 /*
1922 * Set a field value. The report this field belongs to has to be
1923 * created and transferred to the device, to set this value in the
1924 * device.
1925 */
1926
hid_set_field(struct hid_field * field,unsigned offset,__s32 value)1927 int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
1928 {
1929 unsigned size;
1930
1931 if (!field)
1932 return -1;
1933
1934 size = field->report_size;
1935
1936 hid_dump_input(field->report->device, field->usage + offset, value);
1937
1938 if (offset >= field->report_count) {
1939 hid_err(field->report->device, "offset (%d) exceeds report_count (%d)\n",
1940 offset, field->report_count);
1941 return -1;
1942 }
1943 if (field->logical_minimum < 0) {
1944 if (value != snto32(s32ton(value, size), size)) {
1945 hid_err(field->report->device, "value %d is out of range\n", value);
1946 return -1;
1947 }
1948 }
1949 field->value[offset] = value;
1950 return 0;
1951 }
1952 EXPORT_SYMBOL_GPL(hid_set_field);
1953
hid_find_field(struct hid_device * hdev,unsigned int report_type,unsigned int application,unsigned int usage)1954 struct hid_field *hid_find_field(struct hid_device *hdev, unsigned int report_type,
1955 unsigned int application, unsigned int usage)
1956 {
1957 struct list_head *report_list = &hdev->report_enum[report_type].report_list;
1958 struct hid_report *report;
1959 int i, j;
1960
1961 list_for_each_entry(report, report_list, list) {
1962 if (report->application != application)
1963 continue;
1964
1965 for (i = 0; i < report->maxfield; i++) {
1966 struct hid_field *field = report->field[i];
1967
1968 for (j = 0; j < field->maxusage; j++) {
1969 if (field->usage[j].hid == usage)
1970 return field;
1971 }
1972 }
1973 }
1974
1975 return NULL;
1976 }
1977 EXPORT_SYMBOL_GPL(hid_find_field);
1978
hid_get_report(struct hid_report_enum * report_enum,const u8 * data)1979 static struct hid_report *hid_get_report(struct hid_report_enum *report_enum,
1980 const u8 *data)
1981 {
1982 struct hid_report *report;
1983 unsigned int n = 0; /* Normally report number is 0 */
1984
1985 /* Device uses numbered reports, data[0] is report number */
1986 if (report_enum->numbered)
1987 n = *data;
1988
1989 report = report_enum->report_id_hash[n];
1990 if (report == NULL)
1991 dbg_hid("undefined report_id %u received\n", n);
1992
1993 return report;
1994 }
1995
1996 /*
1997 * Implement a generic .request() callback, using .raw_request()
1998 * DO NOT USE in hid drivers directly, but through hid_hw_request instead.
1999 */
__hid_request(struct hid_device * hid,struct hid_report * report,enum hid_class_request reqtype)2000 int __hid_request(struct hid_device *hid, struct hid_report *report,
2001 enum hid_class_request reqtype)
2002 {
2003 u8 *data_buf;
2004 int ret;
2005 u32 len;
2006
2007 u8 *buf __free(kfree) = hid_alloc_report_buf(report, GFP_KERNEL);
2008 if (!buf)
2009 return -ENOMEM;
2010
2011 data_buf = buf;
2012 len = hid_report_len(report);
2013
2014 if (report->id == 0) {
2015 /* reserve the first byte for the report ID */
2016 data_buf++;
2017 len++;
2018 }
2019
2020 if (reqtype == HID_REQ_SET_REPORT)
2021 hid_output_report(report, data_buf);
2022
2023 ret = hid_hw_raw_request(hid, report->id, buf, len, report->type, reqtype);
2024 if (ret < 0) {
2025 dbg_hid("unable to complete request: %d\n", ret);
2026 return ret;
2027 }
2028
2029 if (reqtype == HID_REQ_GET_REPORT)
2030 hid_input_report(hid, report->type, buf, ret, 0);
2031
2032 return 0;
2033 }
2034 EXPORT_SYMBOL_GPL(__hid_request);
2035
hid_report_raw_event(struct hid_device * hid,enum hid_report_type type,u8 * data,size_t bufsize,u32 size,int interrupt)2036 int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *data,
2037 size_t bufsize, u32 size, int interrupt)
2038 {
2039 struct hid_report_enum *report_enum = hid->report_enum + type;
2040 struct hid_report *report;
2041 struct hid_driver *hdrv;
2042 int max_buffer_size = HID_MAX_BUFFER_SIZE;
2043 u32 rsize, csize = size;
2044 size_t bsize = bufsize;
2045 u8 *cdata = data;
2046 int ret = 0;
2047
2048 report = hid_get_report(report_enum, data);
2049 if (!report)
2050 return 0;
2051
2052 if (unlikely(bsize < csize)) {
2053 hid_warn_ratelimited(hid, "Event data for report %d is incorrect (%d vs %ld)\n",
2054 report->id, csize, bsize);
2055 return -EINVAL;
2056 }
2057
2058 if (report_enum->numbered) {
2059 cdata++;
2060 csize--;
2061 bsize--;
2062 }
2063
2064 rsize = hid_compute_report_size(report);
2065
2066 if (hid->ll_driver->max_buffer_size)
2067 max_buffer_size = hid->ll_driver->max_buffer_size;
2068
2069 if (report_enum->numbered && rsize >= max_buffer_size)
2070 rsize = max_buffer_size - 1;
2071 else if (rsize > max_buffer_size)
2072 rsize = max_buffer_size;
2073
2074 if (bsize < rsize) {
2075 hid_warn_ratelimited(hid, "Event data for report %d was too short (%d vs %ld)\n",
2076 report->id, rsize, bsize);
2077 return -EINVAL;
2078 }
2079
2080 if (csize < rsize) {
2081 dbg_hid("report %d is too short, (%d < %d)\n", report->id,
2082 csize, rsize);
2083 memset(cdata + csize, 0, rsize - csize);
2084 }
2085
2086 if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event)
2087 hid->hiddev_report_event(hid, report);
2088 if (hid->claimed & HID_CLAIMED_HIDRAW) {
2089 ret = hidraw_report_event(hid, data, size);
2090 if (ret)
2091 return ret;
2092 }
2093
2094 if (hid->claimed != HID_CLAIMED_HIDRAW && report->maxfield) {
2095 hid_process_report(hid, report, cdata, interrupt);
2096 hdrv = hid->driver;
2097 if (hdrv && hdrv->report)
2098 hdrv->report(hid, report);
2099 }
2100
2101 if (hid->claimed & HID_CLAIMED_INPUT)
2102 hidinput_report_event(hid, report);
2103
2104 return ret;
2105 }
2106 EXPORT_SYMBOL_GPL(hid_report_raw_event);
2107
2108
__hid_input_report(struct hid_device * hid,enum hid_report_type type,u8 * data,size_t bufsize,u32 size,int interrupt,u64 source,bool from_bpf,bool lock_already_taken)2109 static int __hid_input_report(struct hid_device *hid, enum hid_report_type type,
2110 u8 *data, size_t bufsize, u32 size, int interrupt, u64 source,
2111 bool from_bpf, bool lock_already_taken)
2112 {
2113 struct hid_report_enum *report_enum;
2114 struct hid_driver *hdrv;
2115 struct hid_report *report;
2116 int ret = 0;
2117
2118 if (!hid)
2119 return -ENODEV;
2120
2121 ret = down_trylock(&hid->driver_input_lock);
2122 if (lock_already_taken && !ret) {
2123 up(&hid->driver_input_lock);
2124 return -EINVAL;
2125 } else if (!lock_already_taken && ret) {
2126 return -EBUSY;
2127 }
2128
2129 if (!hid->driver) {
2130 ret = -ENODEV;
2131 goto unlock;
2132 }
2133 report_enum = hid->report_enum + type;
2134 hdrv = hid->driver;
2135
2136 data = dispatch_hid_bpf_device_event(hid, type, data, &bufsize, &size, interrupt,
2137 source, from_bpf);
2138 if (IS_ERR(data)) {
2139 ret = PTR_ERR(data);
2140 goto unlock;
2141 }
2142
2143 if (!size) {
2144 dbg_hid("empty report\n");
2145 ret = -1;
2146 goto unlock;
2147 }
2148
2149 /* Avoid unnecessary overhead if debugfs is disabled */
2150 if (!list_empty(&hid->debug_list))
2151 hid_dump_report(hid, type, data, size);
2152
2153 report = hid_get_report(report_enum, data);
2154
2155 if (!report) {
2156 ret = -1;
2157 goto unlock;
2158 }
2159
2160 if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) {
2161 ret = hdrv->raw_event(hid, report, data, size);
2162 if (ret < 0)
2163 goto unlock;
2164 }
2165
2166 ret = hid_report_raw_event(hid, type, data, bufsize, size, interrupt);
2167
2168 unlock:
2169 if (!lock_already_taken)
2170 up(&hid->driver_input_lock);
2171 return ret;
2172 }
2173
2174 /**
2175 * hid_input_report - report data from lower layer (usb, bt...)
2176 *
2177 * @hid: hid device
2178 * @type: HID report type (HID_*_REPORT)
2179 * @data: report contents
2180 * @size: size of data parameter
2181 * @interrupt: distinguish between interrupt and control transfers
2182 *
2183 * This is data entry for lower layers.
2184 * Legacy, please use hid_safe_input_report() instead.
2185 */
hid_input_report(struct hid_device * hid,enum hid_report_type type,u8 * data,u32 size,int interrupt)2186 int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
2187 int interrupt)
2188 {
2189 return __hid_input_report(hid, type, data, size, size, interrupt, 0,
2190 false, /* from_bpf */
2191 false /* lock_already_taken */);
2192 }
2193 EXPORT_SYMBOL_GPL(hid_input_report);
2194
2195 /**
2196 * hid_safe_input_report - report data from lower layer (usb, bt...)
2197 *
2198 * @hid: hid device
2199 * @type: HID report type (HID_*_REPORT)
2200 * @data: report contents
2201 * @bufsize: allocated size of the data buffer
2202 * @size: useful size of data parameter
2203 * @interrupt: distinguish between interrupt and control transfers
2204 *
2205 * This is data entry for lower layers.
2206 * Please use this function instead of the non safe version because we provide
2207 * here the size of the buffer, allowing hid-core to make smarter decisions
2208 * regarding the incoming buffer.
2209 */
hid_safe_input_report(struct hid_device * hid,enum hid_report_type type,u8 * data,size_t bufsize,u32 size,int interrupt)2210 int hid_safe_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data,
2211 size_t bufsize, u32 size, int interrupt)
2212 {
2213 return __hid_input_report(hid, type, data, bufsize, size, interrupt, 0,
2214 false, /* from_bpf */
2215 false /* lock_already_taken */);
2216 }
2217 EXPORT_SYMBOL_GPL(hid_safe_input_report);
2218
hid_match_one_id(const struct hid_device * hdev,const struct hid_device_id * id)2219 bool hid_match_one_id(const struct hid_device *hdev,
2220 const struct hid_device_id *id)
2221 {
2222 return (id->bus == HID_BUS_ANY || id->bus == hdev->bus) &&
2223 (id->group == HID_GROUP_ANY || id->group == hdev->group) &&
2224 (id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) &&
2225 (id->product == HID_ANY_ID || id->product == hdev->product);
2226 }
2227
hid_match_id(const struct hid_device * hdev,const struct hid_device_id * id)2228 const struct hid_device_id *hid_match_id(const struct hid_device *hdev,
2229 const struct hid_device_id *id)
2230 {
2231 for (; id->bus; id++)
2232 if (hid_match_one_id(hdev, id))
2233 return id;
2234
2235 return NULL;
2236 }
2237 EXPORT_SYMBOL_GPL(hid_match_id);
2238
2239 static const struct hid_device_id hid_hiddev_list[] = {
2240 { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS) },
2241 { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS1) },
2242 { }
2243 };
2244
hid_hiddev(struct hid_device * hdev)2245 static bool hid_hiddev(struct hid_device *hdev)
2246 {
2247 return !!hid_match_id(hdev, hid_hiddev_list);
2248 }
2249
2250
2251 static ssize_t
report_descriptor_read(struct file * filp,struct kobject * kobj,const struct bin_attribute * attr,char * buf,loff_t off,size_t count)2252 report_descriptor_read(struct file *filp, struct kobject *kobj,
2253 const struct bin_attribute *attr,
2254 char *buf, loff_t off, size_t count)
2255 {
2256 struct device *dev = kobj_to_dev(kobj);
2257 struct hid_device *hdev = to_hid_device(dev);
2258
2259 if (off >= hdev->rsize)
2260 return 0;
2261
2262 if (off + count > hdev->rsize)
2263 count = hdev->rsize - off;
2264
2265 memcpy(buf, hdev->rdesc + off, count);
2266
2267 return count;
2268 }
2269
2270 static ssize_t
country_show(struct device * dev,struct device_attribute * attr,char * buf)2271 country_show(struct device *dev, struct device_attribute *attr,
2272 char *buf)
2273 {
2274 struct hid_device *hdev = to_hid_device(dev);
2275
2276 return sprintf(buf, "%02x\n", hdev->country & 0xff);
2277 }
2278
2279 static const BIN_ATTR_RO(report_descriptor, HID_MAX_DESCRIPTOR_SIZE);
2280
2281 static const DEVICE_ATTR_RO(country);
2282
hid_connect(struct hid_device * hdev,unsigned int connect_mask)2283 int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
2284 {
2285 static const char *types[] = { "Device", "Pointer", "Mouse", "Device",
2286 "Joystick", "Gamepad", "Keyboard", "Keypad",
2287 "Multi-Axis Controller"
2288 };
2289 const char *type, *bus;
2290 char buf[64] = "";
2291 unsigned int i;
2292 int len;
2293 int ret;
2294
2295 ret = hid_bpf_connect_device(hdev);
2296 if (ret)
2297 return ret;
2298
2299 if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE)
2300 connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV);
2301 if (hdev->quirks & HID_QUIRK_HIDINPUT_FORCE)
2302 connect_mask |= HID_CONNECT_HIDINPUT_FORCE;
2303 if (hdev->bus != BUS_USB)
2304 connect_mask &= ~HID_CONNECT_HIDDEV;
2305 if (hid_hiddev(hdev))
2306 connect_mask |= HID_CONNECT_HIDDEV_FORCE;
2307
2308 if ((connect_mask & HID_CONNECT_HIDINPUT) && !hidinput_connect(hdev,
2309 connect_mask & HID_CONNECT_HIDINPUT_FORCE))
2310 hdev->claimed |= HID_CLAIMED_INPUT;
2311
2312 if ((connect_mask & HID_CONNECT_HIDDEV) && hdev->hiddev_connect &&
2313 !hdev->hiddev_connect(hdev,
2314 connect_mask & HID_CONNECT_HIDDEV_FORCE))
2315 hdev->claimed |= HID_CLAIMED_HIDDEV;
2316 if ((connect_mask & HID_CONNECT_HIDRAW) && !hidraw_connect(hdev))
2317 hdev->claimed |= HID_CLAIMED_HIDRAW;
2318
2319 if (connect_mask & HID_CONNECT_DRIVER)
2320 hdev->claimed |= HID_CLAIMED_DRIVER;
2321
2322 /* Drivers with the ->raw_event callback set are not required to connect
2323 * to any other listener. */
2324 if (!hdev->claimed && !hdev->driver->raw_event) {
2325 hid_err(hdev, "device has no listeners, quitting\n");
2326 return -ENODEV;
2327 }
2328
2329 hid_process_ordering(hdev);
2330
2331 if ((hdev->claimed & HID_CLAIMED_INPUT) &&
2332 (connect_mask & HID_CONNECT_FF) && hdev->ff_init)
2333 hdev->ff_init(hdev);
2334
2335 len = 0;
2336 if (hdev->claimed & HID_CLAIMED_INPUT)
2337 len += sprintf(buf + len, "input");
2338 if (hdev->claimed & HID_CLAIMED_HIDDEV)
2339 len += sprintf(buf + len, "%shiddev%d", len ? "," : "",
2340 ((struct hiddev *)hdev->hiddev)->minor);
2341 if (hdev->claimed & HID_CLAIMED_HIDRAW)
2342 len += sprintf(buf + len, "%shidraw%d", len ? "," : "",
2343 ((struct hidraw *)hdev->hidraw)->minor);
2344
2345 type = "Device";
2346 for (i = 0; i < hdev->maxcollection; i++) {
2347 struct hid_collection *col = &hdev->collection[i];
2348 if (col->type == HID_COLLECTION_APPLICATION &&
2349 (col->usage & HID_USAGE_PAGE) == HID_UP_GENDESK &&
2350 (col->usage & 0xffff) < ARRAY_SIZE(types)) {
2351 type = types[col->usage & 0xffff];
2352 break;
2353 }
2354 }
2355
2356 switch (hdev->bus) {
2357 case BUS_USB:
2358 bus = "USB";
2359 break;
2360 case BUS_BLUETOOTH:
2361 bus = "BLUETOOTH";
2362 break;
2363 case BUS_I2C:
2364 bus = "I2C";
2365 break;
2366 case BUS_SDW:
2367 bus = "SOUNDWIRE";
2368 break;
2369 case BUS_VIRTUAL:
2370 bus = "VIRTUAL";
2371 break;
2372 case BUS_INTEL_ISHTP:
2373 case BUS_AMD_SFH:
2374 bus = "SENSOR HUB";
2375 break;
2376 default:
2377 bus = "<UNKNOWN>";
2378 }
2379
2380 ret = device_create_file(&hdev->dev, &dev_attr_country);
2381 if (ret)
2382 hid_warn(hdev,
2383 "can't create sysfs country code attribute err: %d\n", ret);
2384
2385 hid_info(hdev, "%s: %s HID v%x.%02x %s [%s] on %s\n",
2386 buf, bus, hdev->version >> 8, hdev->version & 0xff,
2387 type, hdev->name, hdev->phys);
2388
2389 return 0;
2390 }
2391 EXPORT_SYMBOL_GPL(hid_connect);
2392
hid_disconnect(struct hid_device * hdev)2393 void hid_disconnect(struct hid_device *hdev)
2394 {
2395 device_remove_file(&hdev->dev, &dev_attr_country);
2396 if (hdev->claimed & HID_CLAIMED_INPUT)
2397 hidinput_disconnect(hdev);
2398 if (hdev->claimed & HID_CLAIMED_HIDDEV)
2399 hdev->hiddev_disconnect(hdev);
2400 if (hdev->claimed & HID_CLAIMED_HIDRAW)
2401 hidraw_disconnect(hdev);
2402 hdev->claimed = 0;
2403
2404 hid_bpf_disconnect_device(hdev);
2405 }
2406 EXPORT_SYMBOL_GPL(hid_disconnect);
2407
2408 /**
2409 * hid_hw_start - start underlying HW
2410 * @hdev: hid device
2411 * @connect_mask: which outputs to connect, see HID_CONNECT_*
2412 *
2413 * Call this in probe function *after* hid_parse. This will setup HW
2414 * buffers and start the device (if not defeirred to device open).
2415 * hid_hw_stop must be called if this was successful.
2416 */
hid_hw_start(struct hid_device * hdev,unsigned int connect_mask)2417 int hid_hw_start(struct hid_device *hdev, unsigned int connect_mask)
2418 {
2419 int error;
2420
2421 error = hdev->ll_driver->start(hdev);
2422 if (error)
2423 return error;
2424
2425 if (connect_mask) {
2426 error = hid_connect(hdev, connect_mask);
2427 if (error) {
2428 hdev->ll_driver->stop(hdev);
2429 return error;
2430 }
2431 }
2432
2433 return 0;
2434 }
2435 EXPORT_SYMBOL_GPL(hid_hw_start);
2436
2437 /**
2438 * hid_hw_stop - stop underlying HW
2439 * @hdev: hid device
2440 *
2441 * This is usually called from remove function or from probe when something
2442 * failed and hid_hw_start was called already.
2443 */
hid_hw_stop(struct hid_device * hdev)2444 void hid_hw_stop(struct hid_device *hdev)
2445 {
2446 hid_disconnect(hdev);
2447 hdev->ll_driver->stop(hdev);
2448 }
2449 EXPORT_SYMBOL_GPL(hid_hw_stop);
2450
2451 /**
2452 * hid_hw_open - signal underlying HW to start delivering events
2453 * @hdev: hid device
2454 *
2455 * Tell underlying HW to start delivering events from the device.
2456 * This function should be called sometime after successful call
2457 * to hid_hw_start().
2458 */
hid_hw_open(struct hid_device * hdev)2459 int hid_hw_open(struct hid_device *hdev)
2460 {
2461 int ret;
2462
2463 ret = mutex_lock_killable(&hdev->ll_open_lock);
2464 if (ret)
2465 return ret;
2466
2467 if (!hdev->ll_open_count++) {
2468 ret = hdev->ll_driver->open(hdev);
2469 if (ret)
2470 hdev->ll_open_count--;
2471
2472 if (hdev->driver->on_hid_hw_open)
2473 hdev->driver->on_hid_hw_open(hdev);
2474 }
2475
2476 mutex_unlock(&hdev->ll_open_lock);
2477 return ret;
2478 }
2479 EXPORT_SYMBOL_GPL(hid_hw_open);
2480
2481 /**
2482 * hid_hw_close - signal underlaying HW to stop delivering events
2483 *
2484 * @hdev: hid device
2485 *
2486 * This function indicates that we are not interested in the events
2487 * from this device anymore. Delivery of events may or may not stop,
2488 * depending on the number of users still outstanding.
2489 */
hid_hw_close(struct hid_device * hdev)2490 void hid_hw_close(struct hid_device *hdev)
2491 {
2492 mutex_lock(&hdev->ll_open_lock);
2493 if (!--hdev->ll_open_count) {
2494 hdev->ll_driver->close(hdev);
2495
2496 if (hdev->driver->on_hid_hw_close)
2497 hdev->driver->on_hid_hw_close(hdev);
2498 }
2499 mutex_unlock(&hdev->ll_open_lock);
2500 }
2501 EXPORT_SYMBOL_GPL(hid_hw_close);
2502
2503 /**
2504 * hid_hw_request - send report request to device
2505 *
2506 * @hdev: hid device
2507 * @report: report to send
2508 * @reqtype: hid request type
2509 */
hid_hw_request(struct hid_device * hdev,struct hid_report * report,enum hid_class_request reqtype)2510 void hid_hw_request(struct hid_device *hdev,
2511 struct hid_report *report, enum hid_class_request reqtype)
2512 {
2513 if (hdev->ll_driver->request)
2514 return hdev->ll_driver->request(hdev, report, reqtype);
2515
2516 __hid_request(hdev, report, reqtype);
2517 }
2518 EXPORT_SYMBOL_GPL(hid_hw_request);
2519
__hid_hw_raw_request(struct hid_device * hdev,unsigned char reportnum,__u8 * buf,size_t len,enum hid_report_type rtype,enum hid_class_request reqtype,u64 source,bool from_bpf)2520 int __hid_hw_raw_request(struct hid_device *hdev,
2521 unsigned char reportnum, __u8 *buf,
2522 size_t len, enum hid_report_type rtype,
2523 enum hid_class_request reqtype,
2524 u64 source, bool from_bpf)
2525 {
2526 unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
2527 int ret;
2528
2529 if (hdev->ll_driver->max_buffer_size)
2530 max_buffer_size = hdev->ll_driver->max_buffer_size;
2531
2532 if (len < 1 || len > max_buffer_size || !buf)
2533 return -EINVAL;
2534
2535 ret = dispatch_hid_bpf_raw_requests(hdev, reportnum, buf, len, rtype,
2536 reqtype, source, from_bpf);
2537 if (ret)
2538 return ret;
2539
2540 return hdev->ll_driver->raw_request(hdev, reportnum, buf, len,
2541 rtype, reqtype);
2542 }
2543
2544 /**
2545 * hid_hw_raw_request - send report request to device
2546 *
2547 * @hdev: hid device
2548 * @reportnum: report ID
2549 * @buf: in/out data to transfer
2550 * @len: length of buf
2551 * @rtype: HID report type
2552 * @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT
2553 *
2554 * Return: count of data transferred, negative if error
2555 *
2556 * Same behavior as hid_hw_request, but with raw buffers instead.
2557 */
hid_hw_raw_request(struct hid_device * hdev,unsigned char reportnum,__u8 * buf,size_t len,enum hid_report_type rtype,enum hid_class_request reqtype)2558 int hid_hw_raw_request(struct hid_device *hdev,
2559 unsigned char reportnum, __u8 *buf,
2560 size_t len, enum hid_report_type rtype, enum hid_class_request reqtype)
2561 {
2562 return __hid_hw_raw_request(hdev, reportnum, buf, len, rtype, reqtype, 0, false);
2563 }
2564 EXPORT_SYMBOL_GPL(hid_hw_raw_request);
2565
__hid_hw_output_report(struct hid_device * hdev,__u8 * buf,size_t len,u64 source,bool from_bpf)2566 int __hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len, u64 source,
2567 bool from_bpf)
2568 {
2569 unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
2570 int ret;
2571
2572 if (hdev->ll_driver->max_buffer_size)
2573 max_buffer_size = hdev->ll_driver->max_buffer_size;
2574
2575 if (len < 1 || len > max_buffer_size || !buf)
2576 return -EINVAL;
2577
2578 ret = dispatch_hid_bpf_output_report(hdev, buf, len, source, from_bpf);
2579 if (ret)
2580 return ret;
2581
2582 if (hdev->ll_driver->output_report)
2583 return hdev->ll_driver->output_report(hdev, buf, len);
2584
2585 return -ENOSYS;
2586 }
2587
2588 /**
2589 * hid_hw_output_report - send output report to device
2590 *
2591 * @hdev: hid device
2592 * @buf: raw data to transfer
2593 * @len: length of buf
2594 *
2595 * Return: count of data transferred, negative if error
2596 */
hid_hw_output_report(struct hid_device * hdev,__u8 * buf,size_t len)2597 int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len)
2598 {
2599 return __hid_hw_output_report(hdev, buf, len, 0, false);
2600 }
2601 EXPORT_SYMBOL_GPL(hid_hw_output_report);
2602
2603 #ifdef CONFIG_PM
hid_driver_suspend(struct hid_device * hdev,pm_message_t state)2604 int hid_driver_suspend(struct hid_device *hdev, pm_message_t state)
2605 {
2606 if (hdev->driver && hdev->driver->suspend)
2607 return hdev->driver->suspend(hdev, state);
2608
2609 return 0;
2610 }
2611 EXPORT_SYMBOL_GPL(hid_driver_suspend);
2612
hid_driver_reset_resume(struct hid_device * hdev)2613 int hid_driver_reset_resume(struct hid_device *hdev)
2614 {
2615 if (hdev->driver && hdev->driver->reset_resume)
2616 return hdev->driver->reset_resume(hdev);
2617
2618 return 0;
2619 }
2620 EXPORT_SYMBOL_GPL(hid_driver_reset_resume);
2621
hid_driver_resume(struct hid_device * hdev)2622 int hid_driver_resume(struct hid_device *hdev)
2623 {
2624 if (hdev->driver && hdev->driver->resume)
2625 return hdev->driver->resume(hdev);
2626
2627 return 0;
2628 }
2629 EXPORT_SYMBOL_GPL(hid_driver_resume);
2630 #endif /* CONFIG_PM */
2631
2632 struct hid_dynid {
2633 struct list_head list;
2634 struct hid_device_id id;
2635 };
2636
2637 /**
2638 * new_id_store - add a new HID device ID to this driver and re-probe devices
2639 * @drv: target device driver
2640 * @buf: buffer for scanning device ID data
2641 * @count: input size
2642 *
2643 * Adds a new dynamic hid device ID to this driver,
2644 * and causes the driver to probe for all devices again.
2645 */
new_id_store(struct device_driver * drv,const char * buf,size_t count)2646 static ssize_t new_id_store(struct device_driver *drv, const char *buf,
2647 size_t count)
2648 {
2649 struct hid_driver *hdrv = to_hid_driver(drv);
2650 struct hid_dynid *dynid;
2651 __u32 bus, vendor, product;
2652 unsigned long driver_data = 0;
2653 int ret;
2654
2655 ret = sscanf(buf, "%x %x %x %lx",
2656 &bus, &vendor, &product, &driver_data);
2657 if (ret < 3)
2658 return -EINVAL;
2659
2660 dynid = kzalloc_obj(*dynid);
2661 if (!dynid)
2662 return -ENOMEM;
2663
2664 dynid->id.bus = bus;
2665 dynid->id.group = HID_GROUP_ANY;
2666 dynid->id.vendor = vendor;
2667 dynid->id.product = product;
2668 dynid->id.driver_data = driver_data;
2669
2670 spin_lock(&hdrv->dyn_lock);
2671 list_add_tail(&dynid->list, &hdrv->dyn_list);
2672 spin_unlock(&hdrv->dyn_lock);
2673
2674 ret = driver_attach(&hdrv->driver);
2675
2676 return ret ? : count;
2677 }
2678 static DRIVER_ATTR_WO(new_id);
2679
2680 static struct attribute *hid_drv_attrs[] = {
2681 &driver_attr_new_id.attr,
2682 NULL,
2683 };
2684 ATTRIBUTE_GROUPS(hid_drv);
2685
hid_free_dynids(struct hid_driver * hdrv)2686 static void hid_free_dynids(struct hid_driver *hdrv)
2687 {
2688 struct hid_dynid *dynid, *n;
2689
2690 spin_lock(&hdrv->dyn_lock);
2691 list_for_each_entry_safe(dynid, n, &hdrv->dyn_list, list) {
2692 list_del(&dynid->list);
2693 kfree(dynid);
2694 }
2695 spin_unlock(&hdrv->dyn_lock);
2696 }
2697
hid_match_device(struct hid_device * hdev,struct hid_driver * hdrv)2698 const struct hid_device_id *hid_match_device(struct hid_device *hdev,
2699 struct hid_driver *hdrv)
2700 {
2701 struct hid_dynid *dynid;
2702
2703 spin_lock(&hdrv->dyn_lock);
2704 list_for_each_entry(dynid, &hdrv->dyn_list, list) {
2705 if (hid_match_one_id(hdev, &dynid->id)) {
2706 spin_unlock(&hdrv->dyn_lock);
2707 return &dynid->id;
2708 }
2709 }
2710 spin_unlock(&hdrv->dyn_lock);
2711
2712 return hid_match_id(hdev, hdrv->id_table);
2713 }
2714 EXPORT_SYMBOL_GPL(hid_match_device);
2715
hid_bus_match(struct device * dev,const struct device_driver * drv)2716 static int hid_bus_match(struct device *dev, const struct device_driver *drv)
2717 {
2718 struct hid_driver *hdrv = to_hid_driver(drv);
2719 struct hid_device *hdev = to_hid_device(dev);
2720
2721 return hid_match_device(hdev, hdrv) != NULL;
2722 }
2723
2724 /**
2725 * hid_compare_device_paths - check if both devices share the same path
2726 * @hdev_a: hid device
2727 * @hdev_b: hid device
2728 * @separator: char to use as separator
2729 *
2730 * Check if two devices share the same path up to the last occurrence of
2731 * the separator char. Both paths must exist (i.e., zero-length paths
2732 * don't match).
2733 */
hid_compare_device_paths(struct hid_device * hdev_a,struct hid_device * hdev_b,char separator)2734 bool hid_compare_device_paths(struct hid_device *hdev_a,
2735 struct hid_device *hdev_b, char separator)
2736 {
2737 int n1 = strrchr(hdev_a->phys, separator) - hdev_a->phys;
2738 int n2 = strrchr(hdev_b->phys, separator) - hdev_b->phys;
2739
2740 if (n1 != n2 || n1 <= 0 || n2 <= 0)
2741 return false;
2742
2743 return !strncmp(hdev_a->phys, hdev_b->phys, n1);
2744 }
2745 EXPORT_SYMBOL_GPL(hid_compare_device_paths);
2746
hid_check_device_match(struct hid_device * hdev,struct hid_driver * hdrv,const struct hid_device_id ** id)2747 static bool hid_check_device_match(struct hid_device *hdev,
2748 struct hid_driver *hdrv,
2749 const struct hid_device_id **id)
2750 {
2751 *id = hid_match_device(hdev, hdrv);
2752 if (!*id)
2753 return false;
2754
2755 if (hdrv->match)
2756 return hdrv->match(hdev, hid_ignore_special_drivers);
2757
2758 /*
2759 * hid-generic implements .match(), so we must be dealing with a
2760 * different HID driver here, and can simply check if
2761 * hid_ignore_special_drivers or HID_QUIRK_IGNORE_SPECIAL_DRIVER
2762 * are set or not.
2763 */
2764 return !hid_ignore_special_drivers && !(hdev->quirks & HID_QUIRK_IGNORE_SPECIAL_DRIVER);
2765 }
2766
hid_set_group(struct hid_device * hdev)2767 static void hid_set_group(struct hid_device *hdev)
2768 {
2769 int ret;
2770
2771 if (hid_ignore_special_drivers) {
2772 hdev->group = HID_GROUP_GENERIC;
2773 } else if (!hdev->group &&
2774 !(hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)) {
2775 ret = hid_scan_report(hdev);
2776 if (ret)
2777 hid_warn(hdev, "bad device descriptor (%d)\n", ret);
2778 }
2779 }
2780
__hid_device_probe(struct hid_device * hdev,struct hid_driver * hdrv)2781 static int __hid_device_probe(struct hid_device *hdev, struct hid_driver *hdrv)
2782 {
2783 const struct hid_device_id *id;
2784 int ret;
2785
2786 if (!hdev->bpf_rsize) {
2787 /* we keep a reference to the currently scanned report descriptor */
2788 const __u8 *original_rdesc = hdev->bpf_rdesc;
2789
2790 if (!original_rdesc)
2791 original_rdesc = hdev->dev_rdesc;
2792
2793 /* in case a bpf program gets detached, we need to free the old one */
2794 hid_free_bpf_rdesc(hdev);
2795
2796 /* keep this around so we know we called it once */
2797 hdev->bpf_rsize = hdev->dev_rsize;
2798
2799 /* call_hid_bpf_rdesc_fixup will always return a valid pointer */
2800 hdev->bpf_rdesc = call_hid_bpf_rdesc_fixup(hdev, hdev->dev_rdesc,
2801 &hdev->bpf_rsize);
2802
2803 /* the report descriptor changed, we need to re-scan it */
2804 if (original_rdesc != hdev->bpf_rdesc) {
2805 hdev->group = 0;
2806 hid_set_group(hdev);
2807 }
2808 }
2809
2810 if (!hid_check_device_match(hdev, hdrv, &id))
2811 return -ENODEV;
2812
2813 hdev->devres_group_id = devres_open_group(&hdev->dev, NULL, GFP_KERNEL);
2814 if (!hdev->devres_group_id)
2815 return -ENOMEM;
2816
2817 /* reset the quirks that has been previously set */
2818 hdev->quirks = hid_lookup_quirk(hdev);
2819 hdev->driver = hdrv;
2820
2821 if (hdrv->probe) {
2822 ret = hdrv->probe(hdev, id);
2823 } else { /* default probe */
2824 ret = hid_open_report(hdev);
2825 if (!ret)
2826 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
2827 }
2828
2829 /*
2830 * Note that we are not closing the devres group opened above so
2831 * even resources that were attached to the device after probe is
2832 * run are released when hid_device_remove() is executed. This is
2833 * needed as some drivers would allocate additional resources,
2834 * for example when updating firmware.
2835 */
2836
2837 if (ret) {
2838 devres_release_group(&hdev->dev, hdev->devres_group_id);
2839 hid_close_report(hdev);
2840 hdev->driver = NULL;
2841 }
2842
2843 return ret;
2844 }
2845
hid_device_probe(struct device * dev)2846 static int hid_device_probe(struct device *dev)
2847 {
2848 struct hid_device *hdev = to_hid_device(dev);
2849 struct hid_driver *hdrv = to_hid_driver(dev->driver);
2850 int ret = 0;
2851
2852 if (down_interruptible(&hdev->driver_input_lock))
2853 return -EINTR;
2854
2855 hdev->io_started = false;
2856 clear_bit(ffs(HID_STAT_REPROBED), &hdev->status);
2857
2858 if (!hdev->driver)
2859 ret = __hid_device_probe(hdev, hdrv);
2860
2861 if (!hdev->io_started)
2862 up(&hdev->driver_input_lock);
2863
2864 return ret;
2865 }
2866
hid_device_remove(struct device * dev)2867 static void hid_device_remove(struct device *dev)
2868 {
2869 struct hid_device *hdev = to_hid_device(dev);
2870 struct hid_driver *hdrv;
2871
2872 down(&hdev->driver_input_lock);
2873 hdev->io_started = false;
2874
2875 hdrv = hdev->driver;
2876 if (hdrv) {
2877 if (hdrv->remove)
2878 hdrv->remove(hdev);
2879 else /* default remove */
2880 hid_hw_stop(hdev);
2881
2882 /* Release all devres resources allocated by the driver */
2883 devres_release_group(&hdev->dev, hdev->devres_group_id);
2884
2885 hid_close_report(hdev);
2886 hdev->driver = NULL;
2887 }
2888
2889 if (!hdev->io_started)
2890 up(&hdev->driver_input_lock);
2891 }
2892
modalias_show(struct device * dev,struct device_attribute * a,char * buf)2893 static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
2894 char *buf)
2895 {
2896 struct hid_device *hdev = container_of(dev, struct hid_device, dev);
2897
2898 return sysfs_emit(buf, "hid:b%04Xg%04Xv%08Xp%08X\n",
2899 hdev->bus, hdev->group, hdev->vendor, hdev->product);
2900 }
2901 static DEVICE_ATTR_RO(modalias);
2902
2903 static struct attribute *hid_dev_attrs[] = {
2904 &dev_attr_modalias.attr,
2905 NULL,
2906 };
2907 static const struct bin_attribute *hid_dev_bin_attrs[] = {
2908 &bin_attr_report_descriptor,
2909 NULL
2910 };
2911 static const struct attribute_group hid_dev_group = {
2912 .attrs = hid_dev_attrs,
2913 .bin_attrs = hid_dev_bin_attrs,
2914 };
2915 __ATTRIBUTE_GROUPS(hid_dev);
2916
hid_uevent(const struct device * dev,struct kobj_uevent_env * env)2917 static int hid_uevent(const struct device *dev, struct kobj_uevent_env *env)
2918 {
2919 const struct hid_device *hdev = to_hid_device(dev);
2920
2921 if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X",
2922 hdev->bus, hdev->vendor, hdev->product))
2923 return -ENOMEM;
2924
2925 if (add_uevent_var(env, "HID_NAME=%s", hdev->name))
2926 return -ENOMEM;
2927
2928 if (add_uevent_var(env, "HID_PHYS=%s", hdev->phys))
2929 return -ENOMEM;
2930
2931 if (add_uevent_var(env, "HID_UNIQ=%s", hdev->uniq))
2932 return -ENOMEM;
2933
2934 if (add_uevent_var(env, "MODALIAS=hid:b%04Xg%04Xv%08Xp%08X",
2935 hdev->bus, hdev->group, hdev->vendor, hdev->product))
2936 return -ENOMEM;
2937 if (hdev->firmware_version) {
2938 if (add_uevent_var(env, "HID_FIRMWARE_VERSION=0x%04llX",
2939 hdev->firmware_version))
2940 return -ENOMEM;
2941 }
2942
2943 return 0;
2944 }
2945
2946 const struct bus_type hid_bus_type = {
2947 .name = "hid",
2948 .dev_groups = hid_dev_groups,
2949 .drv_groups = hid_drv_groups,
2950 .match = hid_bus_match,
2951 .probe = hid_device_probe,
2952 .remove = hid_device_remove,
2953 .uevent = hid_uevent,
2954 };
2955 EXPORT_SYMBOL(hid_bus_type);
2956
hid_add_device(struct hid_device * hdev)2957 int hid_add_device(struct hid_device *hdev)
2958 {
2959 static atomic_t id = ATOMIC_INIT(0);
2960 int ret;
2961
2962 if (WARN_ON(hdev->status & HID_STAT_ADDED))
2963 return -EBUSY;
2964
2965 hdev->quirks = hid_lookup_quirk(hdev);
2966
2967 /* we need to kill them here, otherwise they will stay allocated to
2968 * wait for coming driver */
2969 if (hid_ignore(hdev))
2970 return -ENODEV;
2971
2972 /*
2973 * Check for the mandatory transport channel.
2974 */
2975 if (!hdev->ll_driver->raw_request) {
2976 hid_err(hdev, "transport driver missing .raw_request()\n");
2977 return -EINVAL;
2978 }
2979
2980 /*
2981 * Read the device report descriptor once and use as template
2982 * for the driver-specific modifications.
2983 */
2984 ret = hdev->ll_driver->parse(hdev);
2985 if (ret)
2986 return ret;
2987 if (!hdev->dev_rdesc)
2988 return -ENODEV;
2989
2990 /*
2991 * Scan generic devices for group information
2992 */
2993 hid_set_group(hdev);
2994
2995 hdev->id = atomic_inc_return(&id);
2996
2997 /* XXX hack, any other cleaner solution after the driver core
2998 * is converted to allow more than 20 bytes as the device name? */
2999 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
3000 hdev->vendor, hdev->product, hdev->id);
3001
3002 hid_debug_register(hdev, dev_name(&hdev->dev));
3003 ret = device_add(&hdev->dev);
3004 if (!ret)
3005 hdev->status |= HID_STAT_ADDED;
3006 else
3007 hid_debug_unregister(hdev);
3008
3009 return ret;
3010 }
3011 EXPORT_SYMBOL_GPL(hid_add_device);
3012
3013 /**
3014 * hid_allocate_device - allocate new hid device descriptor
3015 *
3016 * Allocate and initialize hid device, so that hid_destroy_device might be
3017 * used to free it.
3018 *
3019 * New hid_device pointer is returned on success, otherwise ERR_PTR encoded
3020 * error value.
3021 */
hid_allocate_device(void)3022 struct hid_device *hid_allocate_device(void)
3023 {
3024 struct hid_device *hdev;
3025 int ret = -ENOMEM;
3026
3027 hdev = kzalloc_obj(*hdev);
3028 if (hdev == NULL)
3029 return ERR_PTR(ret);
3030
3031 device_initialize(&hdev->dev);
3032 hdev->dev.release = hid_device_release;
3033 hdev->dev.bus = &hid_bus_type;
3034 device_enable_async_suspend(&hdev->dev);
3035
3036 hid_close_report(hdev);
3037
3038 init_waitqueue_head(&hdev->debug_wait);
3039 INIT_LIST_HEAD(&hdev->debug_list);
3040 spin_lock_init(&hdev->debug_list_lock);
3041 sema_init(&hdev->driver_input_lock, 1);
3042 mutex_init(&hdev->ll_open_lock);
3043 kref_init(&hdev->ref);
3044
3045 #ifdef CONFIG_HID_BATTERY_STRENGTH
3046 INIT_LIST_HEAD(&hdev->batteries);
3047 #endif
3048
3049 ret = hid_bpf_device_init(hdev);
3050 if (ret)
3051 goto out_err;
3052
3053 return hdev;
3054
3055 out_err:
3056 hid_destroy_device(hdev);
3057 return ERR_PTR(ret);
3058 }
3059 EXPORT_SYMBOL_GPL(hid_allocate_device);
3060
hid_remove_device(struct hid_device * hdev)3061 static void hid_remove_device(struct hid_device *hdev)
3062 {
3063 if (hdev->status & HID_STAT_ADDED) {
3064 device_del(&hdev->dev);
3065 hid_debug_unregister(hdev);
3066 hdev->status &= ~HID_STAT_ADDED;
3067 }
3068 hid_free_bpf_rdesc(hdev);
3069 kfree(hdev->dev_rdesc);
3070 hdev->dev_rdesc = NULL;
3071 hdev->dev_rsize = 0;
3072 hdev->bpf_rsize = 0;
3073 }
3074
3075 /**
3076 * hid_destroy_device - free previously allocated device
3077 *
3078 * @hdev: hid device
3079 *
3080 * If you allocate hid_device through hid_allocate_device, you should ever
3081 * free by this function.
3082 */
hid_destroy_device(struct hid_device * hdev)3083 void hid_destroy_device(struct hid_device *hdev)
3084 {
3085 hid_bpf_destroy_device(hdev);
3086 hid_remove_device(hdev);
3087 put_device(&hdev->dev);
3088 }
3089 EXPORT_SYMBOL_GPL(hid_destroy_device);
3090
3091
__hid_bus_reprobe_drivers(struct device * dev,void * data)3092 static int __hid_bus_reprobe_drivers(struct device *dev, void *data)
3093 {
3094 struct hid_driver *hdrv = data;
3095 struct hid_device *hdev = to_hid_device(dev);
3096
3097 if (hdev->driver == hdrv &&
3098 !hdrv->match(hdev, hid_ignore_special_drivers) &&
3099 !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
3100 return device_reprobe(dev);
3101
3102 return 0;
3103 }
3104
__hid_bus_driver_added(struct device_driver * drv,void * data)3105 static int __hid_bus_driver_added(struct device_driver *drv, void *data)
3106 {
3107 struct hid_driver *hdrv = to_hid_driver(drv);
3108
3109 if (hdrv->match) {
3110 bus_for_each_dev(&hid_bus_type, NULL, hdrv,
3111 __hid_bus_reprobe_drivers);
3112 }
3113
3114 return 0;
3115 }
3116
__bus_removed_driver(struct device_driver * drv,void * data)3117 static int __bus_removed_driver(struct device_driver *drv, void *data)
3118 {
3119 return bus_rescan_devices(&hid_bus_type);
3120 }
3121
__hid_register_driver(struct hid_driver * hdrv,struct module * owner,const char * mod_name)3122 int __hid_register_driver(struct hid_driver *hdrv, struct module *owner,
3123 const char *mod_name)
3124 {
3125 int ret;
3126
3127 hdrv->driver.name = hdrv->name;
3128 hdrv->driver.bus = &hid_bus_type;
3129 hdrv->driver.owner = owner;
3130 hdrv->driver.mod_name = mod_name;
3131
3132 INIT_LIST_HEAD(&hdrv->dyn_list);
3133 spin_lock_init(&hdrv->dyn_lock);
3134
3135 ret = driver_register(&hdrv->driver);
3136
3137 if (ret == 0)
3138 bus_for_each_drv(&hid_bus_type, NULL, NULL,
3139 __hid_bus_driver_added);
3140
3141 return ret;
3142 }
3143 EXPORT_SYMBOL_GPL(__hid_register_driver);
3144
hid_unregister_driver(struct hid_driver * hdrv)3145 void hid_unregister_driver(struct hid_driver *hdrv)
3146 {
3147 driver_unregister(&hdrv->driver);
3148 hid_free_dynids(hdrv);
3149
3150 bus_for_each_drv(&hid_bus_type, NULL, hdrv, __bus_removed_driver);
3151 }
3152 EXPORT_SYMBOL_GPL(hid_unregister_driver);
3153
hid_check_keys_pressed(struct hid_device * hid)3154 int hid_check_keys_pressed(struct hid_device *hid)
3155 {
3156 struct hid_input *hidinput;
3157 int i;
3158
3159 if (!(hid->claimed & HID_CLAIMED_INPUT))
3160 return 0;
3161
3162 list_for_each_entry(hidinput, &hid->inputs, list) {
3163 for (i = 0; i < BITS_TO_LONGS(KEY_MAX); i++)
3164 if (hidinput->input->key[i])
3165 return 1;
3166 }
3167
3168 return 0;
3169 }
3170 EXPORT_SYMBOL_GPL(hid_check_keys_pressed);
3171
3172 #ifdef CONFIG_HID_BPF
3173 static const struct hid_ops __hid_ops = {
3174 .hid_get_report = hid_get_report,
3175 .hid_hw_raw_request = __hid_hw_raw_request,
3176 .hid_hw_output_report = __hid_hw_output_report,
3177 .hid_input_report = __hid_input_report,
3178 .owner = THIS_MODULE,
3179 .bus_type = &hid_bus_type,
3180 };
3181 #endif
3182
hid_init(void)3183 static int __init hid_init(void)
3184 {
3185 int ret;
3186
3187 ret = bus_register(&hid_bus_type);
3188 if (ret) {
3189 pr_err("can't register hid bus\n");
3190 goto err;
3191 }
3192
3193 #ifdef CONFIG_HID_BPF
3194 hid_ops = &__hid_ops;
3195 #endif
3196
3197 ret = hidraw_init();
3198 if (ret)
3199 goto err_bus;
3200
3201 hid_debug_init();
3202
3203 return 0;
3204 err_bus:
3205 bus_unregister(&hid_bus_type);
3206 err:
3207 return ret;
3208 }
3209
hid_exit(void)3210 static void __exit hid_exit(void)
3211 {
3212 #ifdef CONFIG_HID_BPF
3213 hid_ops = NULL;
3214 #endif
3215 hid_debug_exit();
3216 hidraw_exit();
3217 bus_unregister(&hid_bus_type);
3218 hid_quirks_exit(HID_BUS_ANY);
3219 }
3220
3221 module_init(hid_init);
3222 module_exit(hid_exit);
3223
3224 MODULE_AUTHOR("Andreas Gal");
3225 MODULE_AUTHOR("Vojtech Pavlik");
3226 MODULE_AUTHOR("Jiri Kosina");
3227 MODULE_DESCRIPTION("HID support for Linux");
3228 MODULE_LICENSE("GPL");
3229