1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * HID support for Linux
4 *
5 * Copyright (c) 1999 Andreas Gal
6 * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
7 * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
8 * Copyright (c) 2006-2012 Jiri Kosina
9 */
10
11 /*
12 */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/list.h>
21 #include <linux/mm.h>
22 #include <linux/spinlock.h>
23 #include <linux/unaligned.h>
24 #include <asm/byteorder.h>
25 #include <linux/input.h>
26 #include <linux/wait.h>
27 #include <linux/vmalloc.h>
28 #include <linux/sched.h>
29 #include <linux/semaphore.h>
30
31 #include <linux/hid.h>
32 #include <linux/hiddev.h>
33 #include <linux/hid-debug.h>
34 #include <linux/hidraw.h>
35
36 #include "hid-ids.h"
37
38 /*
39 * Version Information
40 */
41
42 #define DRIVER_DESC "HID core driver"
43
44 static int hid_ignore_special_drivers = 0;
45 module_param_named(ignore_special_drivers, hid_ignore_special_drivers, int, 0600);
46 MODULE_PARM_DESC(ignore_special_drivers, "Ignore any special drivers and handle all devices by generic driver");
47
48 /*
49 * Convert a signed n-bit integer to signed 32-bit integer.
50 */
51
snto32(__u32 value,unsigned int n)52 static s32 snto32(__u32 value, unsigned int n)
53 {
54 if (!value || !n)
55 return 0;
56
57 if (n > 32)
58 n = 32;
59
60 return sign_extend32(value, n - 1);
61 }
62
63 /*
64 * Convert a signed 32-bit integer to a signed n-bit integer.
65 */
66
s32ton(__s32 value,unsigned int n)67 static u32 s32ton(__s32 value, unsigned int n)
68 {
69 s32 a;
70
71 if (!value || !n)
72 return 0;
73
74 a = value >> (n - 1);
75 if (a && a != -1)
76 return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1;
77 return value & ((1 << n) - 1);
78 }
79
80 /*
81 * Register a new report for a device.
82 */
83
hid_register_report(struct hid_device * device,enum hid_report_type type,unsigned int id,unsigned int application)84 struct hid_report *hid_register_report(struct hid_device *device,
85 enum hid_report_type type, unsigned int id,
86 unsigned int application)
87 {
88 struct hid_report_enum *report_enum = device->report_enum + type;
89 struct hid_report *report;
90
91 if (id >= HID_MAX_IDS)
92 return NULL;
93 if (report_enum->report_id_hash[id])
94 return report_enum->report_id_hash[id];
95
96 report = kzalloc_obj(struct hid_report, GFP_KERNEL);
97 if (!report)
98 return NULL;
99
100 if (id != 0)
101 report_enum->numbered = 1;
102
103 report->id = id;
104 report->type = type;
105 report->size = 0;
106 report->device = device;
107 report->application = application;
108 report_enum->report_id_hash[id] = report;
109
110 list_add_tail(&report->list, &report_enum->report_list);
111 INIT_LIST_HEAD(&report->field_entry_list);
112
113 return report;
114 }
115 EXPORT_SYMBOL_GPL(hid_register_report);
116
117 /*
118 * Register a new field for this report.
119 */
120
hid_register_field(struct hid_report * report,unsigned usages)121 static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages)
122 {
123 struct hid_field *field;
124
125 if (report->maxfield == HID_MAX_FIELDS) {
126 hid_err(report->device, "too many fields in report\n");
127 return NULL;
128 }
129
130 field = kvzalloc((sizeof(struct hid_field) +
131 usages * sizeof(struct hid_usage) +
132 3 * usages * sizeof(unsigned int)), GFP_KERNEL);
133 if (!field)
134 return NULL;
135
136 field->index = report->maxfield++;
137 report->field[field->index] = field;
138 field->usage = (struct hid_usage *)(field + 1);
139 field->value = (s32 *)(field->usage + usages);
140 field->new_value = (s32 *)(field->value + usages);
141 field->usages_priorities = (s32 *)(field->new_value + usages);
142 field->report = report;
143
144 return field;
145 }
146
147 /*
148 * Open a collection. The type/usage is pushed on the stack.
149 */
150
open_collection(struct hid_parser * parser,unsigned type)151 static int open_collection(struct hid_parser *parser, unsigned type)
152 {
153 struct hid_collection *collection;
154 unsigned usage;
155 int collection_index;
156
157 usage = parser->local.usage[0];
158
159 if (parser->collection_stack_ptr == parser->collection_stack_size) {
160 unsigned int *collection_stack;
161 unsigned int new_size = parser->collection_stack_size +
162 HID_COLLECTION_STACK_SIZE;
163
164 collection_stack = krealloc(parser->collection_stack,
165 new_size * sizeof(unsigned int),
166 GFP_KERNEL);
167 if (!collection_stack)
168 return -ENOMEM;
169
170 parser->collection_stack = collection_stack;
171 parser->collection_stack_size = new_size;
172 }
173
174 if (parser->device->maxcollection == parser->device->collection_size) {
175 collection = kmalloc(
176 array3_size(sizeof(struct hid_collection),
177 parser->device->collection_size,
178 2),
179 GFP_KERNEL);
180 if (collection == NULL) {
181 hid_err(parser->device, "failed to reallocate collection array\n");
182 return -ENOMEM;
183 }
184 memcpy(collection, parser->device->collection,
185 sizeof(struct hid_collection) *
186 parser->device->collection_size);
187 memset(collection + parser->device->collection_size, 0,
188 sizeof(struct hid_collection) *
189 parser->device->collection_size);
190 kfree(parser->device->collection);
191 parser->device->collection = collection;
192 parser->device->collection_size *= 2;
193 }
194
195 parser->collection_stack[parser->collection_stack_ptr++] =
196 parser->device->maxcollection;
197
198 collection_index = parser->device->maxcollection++;
199 collection = parser->device->collection + collection_index;
200 collection->type = type;
201 collection->usage = usage;
202 collection->level = parser->collection_stack_ptr - 1;
203 collection->parent_idx = (collection->level == 0) ? -1 :
204 parser->collection_stack[collection->level - 1];
205
206 if (type == HID_COLLECTION_APPLICATION)
207 parser->device->maxapplication++;
208
209 return 0;
210 }
211
212 /*
213 * Close a collection.
214 */
215
close_collection(struct hid_parser * parser)216 static int close_collection(struct hid_parser *parser)
217 {
218 if (!parser->collection_stack_ptr) {
219 hid_err(parser->device, "collection stack underflow\n");
220 return -EINVAL;
221 }
222 parser->collection_stack_ptr--;
223 return 0;
224 }
225
226 /*
227 * Climb up the stack, search for the specified collection type
228 * and return the usage.
229 */
230
hid_lookup_collection(struct hid_parser * parser,unsigned type)231 static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
232 {
233 struct hid_collection *collection = parser->device->collection;
234 int n;
235
236 for (n = parser->collection_stack_ptr - 1; n >= 0; n--) {
237 unsigned index = parser->collection_stack[n];
238 if (collection[index].type == type)
239 return collection[index].usage;
240 }
241 return 0; /* we know nothing about this usage type */
242 }
243
244 /*
245 * Concatenate usage which defines 16 bits or less with the
246 * currently defined usage page to form a 32 bit usage
247 */
248
complete_usage(struct hid_parser * parser,unsigned int index)249 static void complete_usage(struct hid_parser *parser, unsigned int index)
250 {
251 parser->local.usage[index] &= 0xFFFF;
252 parser->local.usage[index] |=
253 (parser->global.usage_page & 0xFFFF) << 16;
254 }
255
256 /*
257 * Add a usage to the temporary parser table.
258 */
259
hid_add_usage(struct hid_parser * parser,unsigned usage,u8 size)260 static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size)
261 {
262 if (parser->local.usage_index >= HID_MAX_USAGES) {
263 hid_err(parser->device, "usage index exceeded\n");
264 return -1;
265 }
266 parser->local.usage[parser->local.usage_index] = usage;
267
268 /*
269 * If Usage item only includes usage id, concatenate it with
270 * currently defined usage page
271 */
272 if (size <= 2)
273 complete_usage(parser, parser->local.usage_index);
274
275 parser->local.usage_size[parser->local.usage_index] = size;
276 parser->local.collection_index[parser->local.usage_index] =
277 parser->collection_stack_ptr ?
278 parser->collection_stack[parser->collection_stack_ptr - 1] : 0;
279 parser->local.usage_index++;
280 return 0;
281 }
282
283 /*
284 * Register a new field for this report.
285 */
286
hid_add_field(struct hid_parser * parser,unsigned report_type,unsigned flags)287 static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsigned flags)
288 {
289 struct hid_report *report;
290 struct hid_field *field;
291 unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
292 unsigned int usages;
293 unsigned int offset;
294 unsigned int i;
295 unsigned int application;
296
297 application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION);
298
299 report = hid_register_report(parser->device, report_type,
300 parser->global.report_id, application);
301 if (!report) {
302 hid_err(parser->device, "hid_register_report failed\n");
303 return -1;
304 }
305
306 /* Handle both signed and unsigned cases properly */
307 if ((parser->global.logical_minimum < 0 &&
308 parser->global.logical_maximum <
309 parser->global.logical_minimum) ||
310 (parser->global.logical_minimum >= 0 &&
311 (__u32)parser->global.logical_maximum <
312 (__u32)parser->global.logical_minimum)) {
313 dbg_hid("logical range invalid 0x%x 0x%x\n",
314 parser->global.logical_minimum,
315 parser->global.logical_maximum);
316 return -1;
317 }
318
319 offset = report->size;
320 report->size += parser->global.report_size * parser->global.report_count;
321
322 if (parser->device->ll_driver->max_buffer_size)
323 max_buffer_size = parser->device->ll_driver->max_buffer_size;
324
325 /* Total size check: Allow for possible report index byte */
326 if (report->size > (max_buffer_size - 1) << 3) {
327 hid_err(parser->device, "report is too long\n");
328 return -1;
329 }
330
331 if (!parser->local.usage_index) /* Ignore padding fields */
332 return 0;
333
334 usages = max_t(unsigned, parser->local.usage_index,
335 parser->global.report_count);
336
337 field = hid_register_field(report, usages);
338 if (!field)
339 return 0;
340
341 field->physical = hid_lookup_collection(parser, HID_COLLECTION_PHYSICAL);
342 field->logical = hid_lookup_collection(parser, HID_COLLECTION_LOGICAL);
343 field->application = application;
344
345 for (i = 0; i < usages; i++) {
346 unsigned j = i;
347 /* Duplicate the last usage we parsed if we have excess values */
348 if (i >= parser->local.usage_index)
349 j = parser->local.usage_index - 1;
350 field->usage[i].hid = parser->local.usage[j];
351 field->usage[i].collection_index =
352 parser->local.collection_index[j];
353 field->usage[i].usage_index = i;
354 field->usage[i].resolution_multiplier = 1;
355 }
356
357 field->maxusage = usages;
358 field->flags = flags;
359 field->report_offset = offset;
360 field->report_type = report_type;
361 field->report_size = parser->global.report_size;
362 field->report_count = parser->global.report_count;
363 field->logical_minimum = parser->global.logical_minimum;
364 field->logical_maximum = parser->global.logical_maximum;
365 field->physical_minimum = parser->global.physical_minimum;
366 field->physical_maximum = parser->global.physical_maximum;
367 field->unit_exponent = parser->global.unit_exponent;
368 field->unit = parser->global.unit;
369
370 return 0;
371 }
372
373 /*
374 * Read data value from item.
375 */
376
item_udata(struct hid_item * item)377 static u32 item_udata(struct hid_item *item)
378 {
379 switch (item->size) {
380 case 1: return item->data.u8;
381 case 2: return item->data.u16;
382 case 4: return item->data.u32;
383 }
384 return 0;
385 }
386
item_sdata(struct hid_item * item)387 static s32 item_sdata(struct hid_item *item)
388 {
389 switch (item->size) {
390 case 1: return item->data.s8;
391 case 2: return item->data.s16;
392 case 4: return item->data.s32;
393 }
394 return 0;
395 }
396
397 /*
398 * Process a global item.
399 */
400
hid_parser_global(struct hid_parser * parser,struct hid_item * item)401 static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
402 {
403 __s32 raw_value;
404 switch (item->tag) {
405 case HID_GLOBAL_ITEM_TAG_PUSH:
406
407 if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) {
408 hid_err(parser->device, "global environment stack overflow\n");
409 return -1;
410 }
411
412 memcpy(parser->global_stack + parser->global_stack_ptr++,
413 &parser->global, sizeof(struct hid_global));
414 return 0;
415
416 case HID_GLOBAL_ITEM_TAG_POP:
417
418 if (!parser->global_stack_ptr) {
419 hid_err(parser->device, "global environment stack underflow\n");
420 return -1;
421 }
422
423 memcpy(&parser->global, parser->global_stack +
424 --parser->global_stack_ptr, sizeof(struct hid_global));
425 return 0;
426
427 case HID_GLOBAL_ITEM_TAG_USAGE_PAGE:
428 parser->global.usage_page = item_udata(item);
429 return 0;
430
431 case HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM:
432 parser->global.logical_minimum = item_sdata(item);
433 return 0;
434
435 case HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM:
436 if (parser->global.logical_minimum < 0)
437 parser->global.logical_maximum = item_sdata(item);
438 else
439 parser->global.logical_maximum = item_udata(item);
440 return 0;
441
442 case HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM:
443 parser->global.physical_minimum = item_sdata(item);
444 return 0;
445
446 case HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM:
447 if (parser->global.physical_minimum < 0)
448 parser->global.physical_maximum = item_sdata(item);
449 else
450 parser->global.physical_maximum = item_udata(item);
451 return 0;
452
453 case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT:
454 /* Many devices provide unit exponent as a two's complement
455 * nibble due to the common misunderstanding of HID
456 * specification 1.11, 6.2.2.7 Global Items. Attempt to handle
457 * both this and the standard encoding. */
458 raw_value = item_sdata(item);
459 if (!(raw_value & 0xfffffff0))
460 parser->global.unit_exponent = snto32(raw_value, 4);
461 else
462 parser->global.unit_exponent = raw_value;
463 return 0;
464
465 case HID_GLOBAL_ITEM_TAG_UNIT:
466 parser->global.unit = item_udata(item);
467 return 0;
468
469 case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
470 parser->global.report_size = item_udata(item);
471 if (parser->global.report_size > 256) {
472 hid_err(parser->device, "invalid report_size %d\n",
473 parser->global.report_size);
474 return -1;
475 }
476 return 0;
477
478 case HID_GLOBAL_ITEM_TAG_REPORT_COUNT:
479 parser->global.report_count = item_udata(item);
480 if (parser->global.report_count > HID_MAX_USAGES) {
481 hid_err(parser->device, "invalid report_count %d\n",
482 parser->global.report_count);
483 return -1;
484 }
485 return 0;
486
487 case HID_GLOBAL_ITEM_TAG_REPORT_ID:
488 parser->global.report_id = item_udata(item);
489 if (parser->global.report_id == 0 ||
490 parser->global.report_id >= HID_MAX_IDS) {
491 hid_err(parser->device, "report_id %u is invalid\n",
492 parser->global.report_id);
493 return -1;
494 }
495 return 0;
496
497 default:
498 hid_err(parser->device, "unknown global tag 0x%x\n", item->tag);
499 return -1;
500 }
501 }
502
503 /*
504 * Process a local item.
505 */
506
hid_parser_local(struct hid_parser * parser,struct hid_item * item)507 static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
508 {
509 __u32 data;
510 unsigned n;
511 __u32 count;
512
513 data = item_udata(item);
514
515 switch (item->tag) {
516 case HID_LOCAL_ITEM_TAG_DELIMITER:
517
518 if (data) {
519 /*
520 * We treat items before the first delimiter
521 * as global to all usage sets (branch 0).
522 * In the moment we process only these global
523 * items and the first delimiter set.
524 */
525 if (parser->local.delimiter_depth != 0) {
526 hid_err(parser->device, "nested delimiters\n");
527 return -1;
528 }
529 parser->local.delimiter_depth++;
530 parser->local.delimiter_branch++;
531 } else {
532 if (parser->local.delimiter_depth < 1) {
533 hid_err(parser->device, "bogus close delimiter\n");
534 return -1;
535 }
536 parser->local.delimiter_depth--;
537 }
538 return 0;
539
540 case HID_LOCAL_ITEM_TAG_USAGE:
541
542 if (parser->local.delimiter_branch > 1) {
543 dbg_hid("alternative usage ignored\n");
544 return 0;
545 }
546
547 return hid_add_usage(parser, data, item->size);
548
549 case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
550
551 if (parser->local.delimiter_branch > 1) {
552 dbg_hid("alternative usage ignored\n");
553 return 0;
554 }
555
556 parser->local.usage_minimum = data;
557 return 0;
558
559 case HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM:
560
561 if (parser->local.delimiter_branch > 1) {
562 dbg_hid("alternative usage ignored\n");
563 return 0;
564 }
565
566 count = data - parser->local.usage_minimum;
567 if (count + parser->local.usage_index >= HID_MAX_USAGES) {
568 /*
569 * We do not warn if the name is not set, we are
570 * actually pre-scanning the device.
571 */
572 if (dev_name(&parser->device->dev))
573 hid_warn(parser->device,
574 "ignoring exceeding usage max\n");
575 data = HID_MAX_USAGES - parser->local.usage_index +
576 parser->local.usage_minimum - 1;
577 if (data <= 0) {
578 hid_err(parser->device,
579 "no more usage index available\n");
580 return -1;
581 }
582 }
583
584 for (n = parser->local.usage_minimum; n <= data; n++)
585 if (hid_add_usage(parser, n, item->size)) {
586 dbg_hid("hid_add_usage failed\n");
587 return -1;
588 }
589 return 0;
590
591 default:
592
593 dbg_hid("unknown local item tag 0x%x\n", item->tag);
594 return 0;
595 }
596 return 0;
597 }
598
599 /*
600 * Concatenate Usage Pages into Usages where relevant:
601 * As per specification, 6.2.2.8: "When the parser encounters a main item it
602 * concatenates the last declared Usage Page with a Usage to form a complete
603 * usage value."
604 */
605
hid_concatenate_last_usage_page(struct hid_parser * parser)606 static void hid_concatenate_last_usage_page(struct hid_parser *parser)
607 {
608 int i;
609 unsigned int usage_page;
610 unsigned int current_page;
611
612 if (!parser->local.usage_index)
613 return;
614
615 usage_page = parser->global.usage_page;
616
617 /*
618 * Concatenate usage page again only if last declared Usage Page
619 * has not been already used in previous usages concatenation
620 */
621 for (i = parser->local.usage_index - 1; i >= 0; i--) {
622 if (parser->local.usage_size[i] > 2)
623 /* Ignore extended usages */
624 continue;
625
626 current_page = parser->local.usage[i] >> 16;
627 if (current_page == usage_page)
628 break;
629
630 complete_usage(parser, i);
631 }
632 }
633
634 /*
635 * Process a main item.
636 */
637
hid_parser_main(struct hid_parser * parser,struct hid_item * item)638 static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
639 {
640 __u32 data;
641 int ret;
642
643 hid_concatenate_last_usage_page(parser);
644
645 data = item_udata(item);
646
647 switch (item->tag) {
648 case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
649 ret = open_collection(parser, data & 0xff);
650 break;
651 case HID_MAIN_ITEM_TAG_END_COLLECTION:
652 ret = close_collection(parser);
653 break;
654 case HID_MAIN_ITEM_TAG_INPUT:
655 ret = hid_add_field(parser, HID_INPUT_REPORT, data);
656 break;
657 case HID_MAIN_ITEM_TAG_OUTPUT:
658 ret = hid_add_field(parser, HID_OUTPUT_REPORT, data);
659 break;
660 case HID_MAIN_ITEM_TAG_FEATURE:
661 ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
662 break;
663 default:
664 if (item->tag >= HID_MAIN_ITEM_TAG_RESERVED_MIN &&
665 item->tag <= HID_MAIN_ITEM_TAG_RESERVED_MAX)
666 hid_warn_ratelimited(parser->device, "reserved main item tag 0x%x\n", item->tag);
667 else
668 hid_warn_ratelimited(parser->device, "unknown main item tag 0x%x\n", item->tag);
669 ret = 0;
670 }
671
672 memset(&parser->local, 0, sizeof(parser->local)); /* Reset the local parser environment */
673
674 return ret;
675 }
676
677 /*
678 * Process a reserved item.
679 */
680
hid_parser_reserved(struct hid_parser * parser,struct hid_item * item)681 static int hid_parser_reserved(struct hid_parser *parser, struct hid_item *item)
682 {
683 dbg_hid("reserved item type, tag 0x%x\n", item->tag);
684 return 0;
685 }
686
687 /*
688 * Free a report and all registered fields. The field->usage and
689 * field->value table's are allocated behind the field, so we need
690 * only to free(field) itself.
691 */
692
hid_free_report(struct hid_report * report)693 static void hid_free_report(struct hid_report *report)
694 {
695 unsigned n;
696
697 kfree(report->field_entries);
698
699 for (n = 0; n < report->maxfield; n++)
700 kvfree(report->field[n]);
701 kfree(report);
702 }
703
704 /*
705 * Close report. This function returns the device
706 * state to the point prior to hid_open_report().
707 */
hid_close_report(struct hid_device * device)708 static void hid_close_report(struct hid_device *device)
709 {
710 unsigned i, j;
711
712 for (i = 0; i < HID_REPORT_TYPES; i++) {
713 struct hid_report_enum *report_enum = device->report_enum + i;
714
715 for (j = 0; j < HID_MAX_IDS; j++) {
716 struct hid_report *report = report_enum->report_id_hash[j];
717 if (report)
718 hid_free_report(report);
719 }
720 memset(report_enum, 0, sizeof(*report_enum));
721 INIT_LIST_HEAD(&report_enum->report_list);
722 }
723
724 /*
725 * If the HID driver had a rdesc_fixup() callback, dev->rdesc
726 * will be allocated by hid-core and needs to be freed.
727 * Otherwise, it is either equal to dev_rdesc or bpf_rdesc, in
728 * which cases it'll be freed later on device removal or destroy.
729 */
730 if (device->rdesc != device->dev_rdesc && device->rdesc != device->bpf_rdesc)
731 kfree(device->rdesc);
732 device->rdesc = NULL;
733 device->rsize = 0;
734
735 kfree(device->collection);
736 device->collection = NULL;
737 device->collection_size = 0;
738 device->maxcollection = 0;
739 device->maxapplication = 0;
740
741 device->status &= ~HID_STAT_PARSED;
742 }
743
hid_free_bpf_rdesc(struct hid_device * hdev)744 static inline void hid_free_bpf_rdesc(struct hid_device *hdev)
745 {
746 /* bpf_rdesc is either equal to dev_rdesc or allocated by call_hid_bpf_rdesc_fixup() */
747 if (hdev->bpf_rdesc != hdev->dev_rdesc)
748 kfree(hdev->bpf_rdesc);
749 hdev->bpf_rdesc = NULL;
750 }
751
752 /*
753 * Free a device structure, all reports, and all fields.
754 */
755
hiddev_free(struct kref * ref)756 void hiddev_free(struct kref *ref)
757 {
758 struct hid_device *hid = container_of(ref, struct hid_device, ref);
759
760 hid_close_report(hid);
761 hid_free_bpf_rdesc(hid);
762 kfree(hid->dev_rdesc);
763 kfree(hid);
764 }
765
hid_device_release(struct device * dev)766 static void hid_device_release(struct device *dev)
767 {
768 struct hid_device *hid = to_hid_device(dev);
769
770 kref_put(&hid->ref, hiddev_free);
771 }
772
773 /*
774 * Fetch a report description item from the data stream. We support long
775 * items, though they are not used yet.
776 */
777
fetch_item(const __u8 * start,const __u8 * end,struct hid_item * item)778 static const u8 *fetch_item(const __u8 *start, const __u8 *end, struct hid_item *item)
779 {
780 u8 b;
781
782 if ((end - start) <= 0)
783 return NULL;
784
785 b = *start++;
786
787 item->type = (b >> 2) & 3;
788 item->tag = (b >> 4) & 15;
789
790 if (item->tag == HID_ITEM_TAG_LONG) {
791
792 item->format = HID_ITEM_FORMAT_LONG;
793
794 if ((end - start) < 2)
795 return NULL;
796
797 item->size = *start++;
798 item->tag = *start++;
799
800 if ((end - start) < item->size)
801 return NULL;
802
803 item->data.longdata = start;
804 start += item->size;
805 return start;
806 }
807
808 item->format = HID_ITEM_FORMAT_SHORT;
809 item->size = BIT(b & 3) >> 1; /* 0, 1, 2, 3 -> 0, 1, 2, 4 */
810
811 if (end - start < item->size)
812 return NULL;
813
814 switch (item->size) {
815 case 0:
816 break;
817
818 case 1:
819 item->data.u8 = *start;
820 break;
821
822 case 2:
823 item->data.u16 = get_unaligned_le16(start);
824 break;
825
826 case 4:
827 item->data.u32 = get_unaligned_le32(start);
828 break;
829 }
830
831 return start + item->size;
832 }
833
hid_scan_input_usage(struct hid_parser * parser,u32 usage)834 static void hid_scan_input_usage(struct hid_parser *parser, u32 usage)
835 {
836 struct hid_device *hid = parser->device;
837
838 if (usage == HID_DG_CONTACTID)
839 hid->group = HID_GROUP_MULTITOUCH;
840 }
841
hid_scan_feature_usage(struct hid_parser * parser,u32 usage)842 static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage)
843 {
844 if (usage == 0xff0000c5 && parser->global.report_count == 256 &&
845 parser->global.report_size == 8)
846 parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
847
848 if (usage == 0xff0000c6 && parser->global.report_count == 1 &&
849 parser->global.report_size == 8)
850 parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
851 }
852
hid_scan_collection(struct hid_parser * parser,unsigned type)853 static void hid_scan_collection(struct hid_parser *parser, unsigned type)
854 {
855 struct hid_device *hid = parser->device;
856 int i;
857
858 if (((parser->global.usage_page << 16) == HID_UP_SENSOR) &&
859 (type == HID_COLLECTION_PHYSICAL ||
860 type == HID_COLLECTION_APPLICATION))
861 hid->group = HID_GROUP_SENSOR_HUB;
862
863 if (hid->vendor == USB_VENDOR_ID_MICROSOFT &&
864 hid->product == USB_DEVICE_ID_MS_POWER_COVER &&
865 hid->group == HID_GROUP_MULTITOUCH)
866 hid->group = HID_GROUP_GENERIC;
867
868 if ((parser->global.usage_page << 16) == HID_UP_GENDESK)
869 for (i = 0; i < parser->local.usage_index; i++)
870 if (parser->local.usage[i] == HID_GD_POINTER)
871 parser->scan_flags |= HID_SCAN_FLAG_GD_POINTER;
872
873 if ((parser->global.usage_page << 16) >= HID_UP_MSVENDOR)
874 parser->scan_flags |= HID_SCAN_FLAG_VENDOR_SPECIFIC;
875
876 if ((parser->global.usage_page << 16) == HID_UP_GOOGLEVENDOR)
877 for (i = 0; i < parser->local.usage_index; i++)
878 if (parser->local.usage[i] ==
879 (HID_UP_GOOGLEVENDOR | 0x0001))
880 parser->device->group =
881 HID_GROUP_VIVALDI;
882 }
883
hid_scan_main(struct hid_parser * parser,struct hid_item * item)884 static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
885 {
886 __u32 data;
887 int i;
888
889 hid_concatenate_last_usage_page(parser);
890
891 data = item_udata(item);
892
893 switch (item->tag) {
894 case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
895 hid_scan_collection(parser, data & 0xff);
896 break;
897 case HID_MAIN_ITEM_TAG_END_COLLECTION:
898 break;
899 case HID_MAIN_ITEM_TAG_INPUT:
900 /* ignore constant inputs, they will be ignored by hid-input */
901 if (data & HID_MAIN_ITEM_CONSTANT)
902 break;
903 for (i = 0; i < parser->local.usage_index; i++)
904 hid_scan_input_usage(parser, parser->local.usage[i]);
905 break;
906 case HID_MAIN_ITEM_TAG_OUTPUT:
907 break;
908 case HID_MAIN_ITEM_TAG_FEATURE:
909 for (i = 0; i < parser->local.usage_index; i++)
910 hid_scan_feature_usage(parser, parser->local.usage[i]);
911 break;
912 }
913
914 /* Reset the local parser environment */
915 memset(&parser->local, 0, sizeof(parser->local));
916
917 return 0;
918 }
919
920 /*
921 * Scan a report descriptor before the device is added to the bus.
922 * Sets device groups and other properties that determine what driver
923 * to load.
924 */
hid_scan_report(struct hid_device * hid)925 static int hid_scan_report(struct hid_device *hid)
926 {
927 struct hid_parser *parser;
928 struct hid_item item;
929 const __u8 *start = hid->dev_rdesc;
930 const __u8 *end = start + hid->dev_rsize;
931 static int (*dispatch_type[])(struct hid_parser *parser,
932 struct hid_item *item) = {
933 hid_scan_main,
934 hid_parser_global,
935 hid_parser_local,
936 hid_parser_reserved
937 };
938
939 parser = vzalloc(sizeof(struct hid_parser));
940 if (!parser)
941 return -ENOMEM;
942
943 parser->device = hid;
944 hid->group = HID_GROUP_GENERIC;
945
946 /*
947 * In case we are re-scanning after a BPF has been loaded,
948 * we need to use the bpf report descriptor, not the original one.
949 */
950 if (hid->bpf_rdesc && hid->bpf_rsize) {
951 start = hid->bpf_rdesc;
952 end = start + hid->bpf_rsize;
953 }
954
955 /*
956 * The parsing is simpler than the one in hid_open_report() as we should
957 * be robust against hid errors. Those errors will be raised by
958 * hid_open_report() anyway.
959 */
960 while ((start = fetch_item(start, end, &item)) != NULL)
961 dispatch_type[item.type](parser, &item);
962
963 /*
964 * Handle special flags set during scanning.
965 */
966 if ((parser->scan_flags & HID_SCAN_FLAG_MT_WIN_8) &&
967 (hid->group == HID_GROUP_MULTITOUCH))
968 hid->group = HID_GROUP_MULTITOUCH_WIN_8;
969
970 /*
971 * Vendor specific handlings
972 */
973 switch (hid->vendor) {
974 case USB_VENDOR_ID_WACOM:
975 hid->group = HID_GROUP_WACOM;
976 break;
977 case USB_VENDOR_ID_SYNAPTICS:
978 if (hid->group == HID_GROUP_GENERIC)
979 if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC)
980 && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER))
981 /*
982 * hid-rmi should take care of them,
983 * not hid-generic
984 */
985 hid->group = HID_GROUP_RMI;
986 break;
987 }
988
989 kfree(parser->collection_stack);
990 vfree(parser);
991 return 0;
992 }
993
994 /**
995 * hid_parse_report - parse device report
996 *
997 * @hid: hid device
998 * @start: report start
999 * @size: report size
1000 *
1001 * Allocate the device report as read by the bus driver. This function should
1002 * only be called from parse() in ll drivers.
1003 */
hid_parse_report(struct hid_device * hid,const __u8 * start,unsigned size)1004 int hid_parse_report(struct hid_device *hid, const __u8 *start, unsigned size)
1005 {
1006 hid->dev_rdesc = kmemdup(start, size, GFP_KERNEL);
1007 if (!hid->dev_rdesc)
1008 return -ENOMEM;
1009 hid->dev_rsize = size;
1010 return 0;
1011 }
1012 EXPORT_SYMBOL_GPL(hid_parse_report);
1013
1014 static const char * const hid_report_names[] = {
1015 "HID_INPUT_REPORT",
1016 "HID_OUTPUT_REPORT",
1017 "HID_FEATURE_REPORT",
1018 };
1019 /**
1020 * hid_validate_values - validate existing device report's value indexes
1021 *
1022 * @hid: hid device
1023 * @type: which report type to examine
1024 * @id: which report ID to examine (0 for first)
1025 * @field_index: which report field to examine
1026 * @report_counts: expected number of values
1027 *
1028 * Validate the number of values in a given field of a given report, after
1029 * parsing.
1030 */
hid_validate_values(struct hid_device * hid,enum hid_report_type type,unsigned int id,unsigned int field_index,unsigned int report_counts)1031 struct hid_report *hid_validate_values(struct hid_device *hid,
1032 enum hid_report_type type, unsigned int id,
1033 unsigned int field_index,
1034 unsigned int report_counts)
1035 {
1036 struct hid_report *report;
1037
1038 if (type > HID_FEATURE_REPORT) {
1039 hid_err(hid, "invalid HID report type %u\n", type);
1040 return NULL;
1041 }
1042
1043 if (id >= HID_MAX_IDS) {
1044 hid_err(hid, "invalid HID report id %u\n", id);
1045 return NULL;
1046 }
1047
1048 /*
1049 * Explicitly not using hid_get_report() here since it depends on
1050 * ->numbered being checked, which may not always be the case when
1051 * drivers go to access report values.
1052 */
1053 if (id == 0) {
1054 /*
1055 * Validating on id 0 means we should examine the first
1056 * report in the list.
1057 */
1058 report = list_first_entry_or_null(
1059 &hid->report_enum[type].report_list,
1060 struct hid_report, list);
1061 } else {
1062 report = hid->report_enum[type].report_id_hash[id];
1063 }
1064 if (!report) {
1065 hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
1066 return NULL;
1067 }
1068 if (report->maxfield <= field_index) {
1069 hid_err(hid, "not enough fields in %s %u\n",
1070 hid_report_names[type], id);
1071 return NULL;
1072 }
1073 if (report->field[field_index]->report_count < report_counts) {
1074 hid_err(hid, "not enough values in %s %u field %u\n",
1075 hid_report_names[type], id, field_index);
1076 return NULL;
1077 }
1078 return report;
1079 }
1080 EXPORT_SYMBOL_GPL(hid_validate_values);
1081
hid_calculate_multiplier(struct hid_device * hid,struct hid_field * multiplier)1082 static int hid_calculate_multiplier(struct hid_device *hid,
1083 struct hid_field *multiplier)
1084 {
1085 int m;
1086 __s32 v = *multiplier->value;
1087 __s32 lmin = multiplier->logical_minimum;
1088 __s32 lmax = multiplier->logical_maximum;
1089 __s32 pmin = multiplier->physical_minimum;
1090 __s32 pmax = multiplier->physical_maximum;
1091
1092 /*
1093 * "Because OS implementations will generally divide the control's
1094 * reported count by the Effective Resolution Multiplier, designers
1095 * should take care not to establish a potential Effective
1096 * Resolution Multiplier of zero."
1097 * HID Usage Table, v1.12, Section 4.3.1, p31
1098 */
1099 if (lmax - lmin == 0)
1100 return 1;
1101 /*
1102 * Handling the unit exponent is left as an exercise to whoever
1103 * finds a device where that exponent is not 0.
1104 */
1105 m = ((v - lmin)/(lmax - lmin) * (pmax - pmin) + pmin);
1106 if (unlikely(multiplier->unit_exponent != 0)) {
1107 hid_warn(hid,
1108 "unsupported Resolution Multiplier unit exponent %d\n",
1109 multiplier->unit_exponent);
1110 }
1111
1112 /* There are no devices with an effective multiplier > 255 */
1113 if (unlikely(m == 0 || m > 255 || m < -255)) {
1114 hid_warn(hid, "unsupported Resolution Multiplier %d\n", m);
1115 m = 1;
1116 }
1117
1118 return m;
1119 }
1120
hid_apply_multiplier_to_field(struct hid_device * hid,struct hid_field * field,struct hid_collection * multiplier_collection,int effective_multiplier)1121 static void hid_apply_multiplier_to_field(struct hid_device *hid,
1122 struct hid_field *field,
1123 struct hid_collection *multiplier_collection,
1124 int effective_multiplier)
1125 {
1126 struct hid_collection *collection;
1127 struct hid_usage *usage;
1128 int i;
1129
1130 /*
1131 * If multiplier_collection is NULL, the multiplier applies
1132 * to all fields in the report.
1133 * Otherwise, it is the Logical Collection the multiplier applies to
1134 * but our field may be in a subcollection of that collection.
1135 */
1136 for (i = 0; i < field->maxusage; i++) {
1137 usage = &field->usage[i];
1138
1139 collection = &hid->collection[usage->collection_index];
1140 while (collection->parent_idx != -1 &&
1141 collection != multiplier_collection)
1142 collection = &hid->collection[collection->parent_idx];
1143
1144 if (collection->parent_idx != -1 ||
1145 multiplier_collection == NULL)
1146 usage->resolution_multiplier = effective_multiplier;
1147
1148 }
1149 }
1150
hid_apply_multiplier(struct hid_device * hid,struct hid_field * multiplier)1151 static void hid_apply_multiplier(struct hid_device *hid,
1152 struct hid_field *multiplier)
1153 {
1154 struct hid_report_enum *rep_enum;
1155 struct hid_report *rep;
1156 struct hid_field *field;
1157 struct hid_collection *multiplier_collection;
1158 int effective_multiplier;
1159 int i;
1160
1161 /*
1162 * "The Resolution Multiplier control must be contained in the same
1163 * Logical Collection as the control(s) to which it is to be applied.
1164 * If no Resolution Multiplier is defined, then the Resolution
1165 * Multiplier defaults to 1. If more than one control exists in a
1166 * Logical Collection, the Resolution Multiplier is associated with
1167 * all controls in the collection. If no Logical Collection is
1168 * defined, the Resolution Multiplier is associated with all
1169 * controls in the report."
1170 * HID Usage Table, v1.12, Section 4.3.1, p30
1171 *
1172 * Thus, search from the current collection upwards until we find a
1173 * logical collection. Then search all fields for that same parent
1174 * collection. Those are the fields the multiplier applies to.
1175 *
1176 * If we have more than one multiplier, it will overwrite the
1177 * applicable fields later.
1178 */
1179 multiplier_collection = &hid->collection[multiplier->usage->collection_index];
1180 while (multiplier_collection->parent_idx != -1 &&
1181 multiplier_collection->type != HID_COLLECTION_LOGICAL)
1182 multiplier_collection = &hid->collection[multiplier_collection->parent_idx];
1183 if (multiplier_collection->type != HID_COLLECTION_LOGICAL)
1184 multiplier_collection = NULL;
1185
1186 effective_multiplier = hid_calculate_multiplier(hid, multiplier);
1187
1188 rep_enum = &hid->report_enum[HID_INPUT_REPORT];
1189 list_for_each_entry(rep, &rep_enum->report_list, list) {
1190 for (i = 0; i < rep->maxfield; i++) {
1191 field = rep->field[i];
1192 hid_apply_multiplier_to_field(hid, field,
1193 multiplier_collection,
1194 effective_multiplier);
1195 }
1196 }
1197 }
1198
1199 /*
1200 * hid_setup_resolution_multiplier - set up all resolution multipliers
1201 *
1202 * @device: hid device
1203 *
1204 * Search for all Resolution Multiplier Feature Reports and apply their
1205 * value to all matching Input items. This only updates the internal struct
1206 * fields.
1207 *
1208 * The Resolution Multiplier is applied by the hardware. If the multiplier
1209 * is anything other than 1, the hardware will send pre-multiplied events
1210 * so that the same physical interaction generates an accumulated
1211 * accumulated_value = value * * multiplier
1212 * This may be achieved by sending
1213 * - "value * multiplier" for each event, or
1214 * - "value" but "multiplier" times as frequently, or
1215 * - a combination of the above
1216 * The only guarantee is that the same physical interaction always generates
1217 * an accumulated 'value * multiplier'.
1218 *
1219 * This function must be called before any event processing and after
1220 * any SetRequest to the Resolution Multiplier.
1221 */
hid_setup_resolution_multiplier(struct hid_device * hid)1222 void hid_setup_resolution_multiplier(struct hid_device *hid)
1223 {
1224 struct hid_report_enum *rep_enum;
1225 struct hid_report *rep;
1226 struct hid_usage *usage;
1227 int i, j;
1228
1229 rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
1230 list_for_each_entry(rep, &rep_enum->report_list, list) {
1231 for (i = 0; i < rep->maxfield; i++) {
1232 /* Ignore if report count is out of bounds. */
1233 if (rep->field[i]->report_count < 1)
1234 continue;
1235
1236 for (j = 0; j < rep->field[i]->maxusage; j++) {
1237 usage = &rep->field[i]->usage[j];
1238 if (usage->hid == HID_GD_RESOLUTION_MULTIPLIER)
1239 hid_apply_multiplier(hid,
1240 rep->field[i]);
1241 }
1242 }
1243 }
1244 }
1245 EXPORT_SYMBOL_GPL(hid_setup_resolution_multiplier);
1246
1247 /**
1248 * hid_open_report - open a driver-specific device report
1249 *
1250 * @device: hid device
1251 *
1252 * Parse a report description into a hid_device structure. Reports are
1253 * enumerated, fields are attached to these reports.
1254 * 0 returned on success, otherwise nonzero error value.
1255 *
1256 * This function (or the equivalent hid_parse() macro) should only be
1257 * called from probe() in drivers, before starting the device.
1258 */
hid_open_report(struct hid_device * device)1259 int hid_open_report(struct hid_device *device)
1260 {
1261 struct hid_parser *parser;
1262 struct hid_item item;
1263 unsigned int size;
1264 const __u8 *start;
1265 const __u8 *end;
1266 const __u8 *next;
1267 int ret;
1268 int i;
1269 static int (*dispatch_type[])(struct hid_parser *parser,
1270 struct hid_item *item) = {
1271 hid_parser_main,
1272 hid_parser_global,
1273 hid_parser_local,
1274 hid_parser_reserved
1275 };
1276
1277 if (WARN_ON(device->status & HID_STAT_PARSED))
1278 return -EBUSY;
1279
1280 start = device->bpf_rdesc;
1281 if (WARN_ON(!start))
1282 return -ENODEV;
1283 size = device->bpf_rsize;
1284
1285 if (device->driver->report_fixup) {
1286 /*
1287 * device->driver->report_fixup() needs to work
1288 * on a copy of our report descriptor so it can
1289 * change it.
1290 */
1291 __u8 *buf = kmemdup(start, size, GFP_KERNEL);
1292
1293 if (buf == NULL)
1294 return -ENOMEM;
1295
1296 start = device->driver->report_fixup(device, buf, &size);
1297
1298 /*
1299 * The second kmemdup is required in case report_fixup() returns
1300 * a static read-only memory, but we have no idea if that memory
1301 * needs to be cleaned up or not at the end.
1302 */
1303 start = kmemdup(start, size, GFP_KERNEL);
1304 kfree(buf);
1305 if (start == NULL)
1306 return -ENOMEM;
1307 }
1308
1309 device->rdesc = start;
1310 device->rsize = size;
1311
1312 parser = vzalloc(sizeof(struct hid_parser));
1313 if (!parser) {
1314 ret = -ENOMEM;
1315 goto alloc_err;
1316 }
1317
1318 parser->device = device;
1319
1320 end = start + size;
1321
1322 device->collection = kzalloc_objs(struct hid_collection,
1323 HID_DEFAULT_NUM_COLLECTIONS,
1324 GFP_KERNEL);
1325 if (!device->collection) {
1326 ret = -ENOMEM;
1327 goto err;
1328 }
1329 device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
1330 for (i = 0; i < HID_DEFAULT_NUM_COLLECTIONS; i++)
1331 device->collection[i].parent_idx = -1;
1332
1333 ret = -EINVAL;
1334 while ((next = fetch_item(start, end, &item)) != NULL) {
1335 start = next;
1336
1337 if (item.format != HID_ITEM_FORMAT_SHORT) {
1338 hid_err(device, "unexpected long global item\n");
1339 goto err;
1340 }
1341
1342 if (dispatch_type[item.type](parser, &item)) {
1343 hid_err(device, "item %u %u %u %u parsing failed\n",
1344 item.format, (unsigned)item.size,
1345 (unsigned)item.type, (unsigned)item.tag);
1346 goto err;
1347 }
1348
1349 if (start == end) {
1350 if (parser->collection_stack_ptr) {
1351 hid_err(device, "unbalanced collection at end of report description\n");
1352 goto err;
1353 }
1354 if (parser->local.delimiter_depth) {
1355 hid_err(device, "unbalanced delimiter at end of report description\n");
1356 goto err;
1357 }
1358
1359 /*
1360 * fetch initial values in case the device's
1361 * default multiplier isn't the recommended 1
1362 */
1363 hid_setup_resolution_multiplier(device);
1364
1365 kfree(parser->collection_stack);
1366 vfree(parser);
1367 device->status |= HID_STAT_PARSED;
1368
1369 return 0;
1370 }
1371 }
1372
1373 hid_err(device, "item fetching failed at offset %u/%u\n",
1374 size - (unsigned int)(end - start), size);
1375 err:
1376 kfree(parser->collection_stack);
1377 alloc_err:
1378 vfree(parser);
1379 hid_close_report(device);
1380 return ret;
1381 }
1382 EXPORT_SYMBOL_GPL(hid_open_report);
1383
1384 /*
1385 * Extract/implement a data field from/to a little endian report (bit array).
1386 *
1387 * Code sort-of follows HID spec:
1388 * http://www.usb.org/developers/hidpage/HID1_11.pdf
1389 *
1390 * While the USB HID spec allows unlimited length bit fields in "report
1391 * descriptors", most devices never use more than 16 bits.
1392 * One model of UPS is claimed to report "LINEV" as a 32-bit field.
1393 * Search linux-kernel and linux-usb-devel archives for "hid-core extract".
1394 */
1395
__extract(u8 * report,unsigned offset,int n)1396 static u32 __extract(u8 *report, unsigned offset, int n)
1397 {
1398 unsigned int idx = offset / 8;
1399 unsigned int bit_nr = 0;
1400 unsigned int bit_shift = offset % 8;
1401 int bits_to_copy = 8 - bit_shift;
1402 u32 value = 0;
1403 u32 mask = n < 32 ? (1U << n) - 1 : ~0U;
1404
1405 while (n > 0) {
1406 value |= ((u32)report[idx] >> bit_shift) << bit_nr;
1407 n -= bits_to_copy;
1408 bit_nr += bits_to_copy;
1409 bits_to_copy = 8;
1410 bit_shift = 0;
1411 idx++;
1412 }
1413
1414 return value & mask;
1415 }
1416
hid_field_extract(const struct hid_device * hid,u8 * report,unsigned offset,unsigned n)1417 u32 hid_field_extract(const struct hid_device *hid, u8 *report,
1418 unsigned offset, unsigned n)
1419 {
1420 if (n > 32) {
1421 hid_warn_once(hid, "%s() called with n (%d) > 32! (%s)\n",
1422 __func__, n, current->comm);
1423 n = 32;
1424 }
1425
1426 return __extract(report, offset, n);
1427 }
1428 EXPORT_SYMBOL_GPL(hid_field_extract);
1429
1430 /*
1431 * "implement" : set bits in a little endian bit stream.
1432 * Same concepts as "extract" (see comments above).
1433 * The data mangled in the bit stream remains in little endian
1434 * order the whole time. It make more sense to talk about
1435 * endianness of register values by considering a register
1436 * a "cached" copy of the little endian bit stream.
1437 */
1438
__implement(u8 * report,unsigned offset,int n,u32 value)1439 static void __implement(u8 *report, unsigned offset, int n, u32 value)
1440 {
1441 unsigned int idx = offset / 8;
1442 unsigned int bit_shift = offset % 8;
1443 int bits_to_set = 8 - bit_shift;
1444
1445 while (n - bits_to_set >= 0) {
1446 report[idx] &= ~(0xff << bit_shift);
1447 report[idx] |= value << bit_shift;
1448 value >>= bits_to_set;
1449 n -= bits_to_set;
1450 bits_to_set = 8;
1451 bit_shift = 0;
1452 idx++;
1453 }
1454
1455 /* last nibble */
1456 if (n) {
1457 u8 bit_mask = ((1U << n) - 1);
1458 report[idx] &= ~(bit_mask << bit_shift);
1459 report[idx] |= value << bit_shift;
1460 }
1461 }
1462
implement(const struct hid_device * hid,u8 * report,unsigned offset,unsigned n,u32 value)1463 static void implement(const struct hid_device *hid, u8 *report,
1464 unsigned offset, unsigned n, u32 value)
1465 {
1466 if (unlikely(n > 32)) {
1467 hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n",
1468 __func__, n, current->comm);
1469 n = 32;
1470 } else if (n < 32) {
1471 u32 m = (1U << n) - 1;
1472
1473 if (unlikely(value > m)) {
1474 hid_warn(hid,
1475 "%s() called with too large value %d (n: %d)! (%s)\n",
1476 __func__, value, n, current->comm);
1477 value &= m;
1478 }
1479 }
1480
1481 __implement(report, offset, n, value);
1482 }
1483
1484 /*
1485 * Search an array for a value.
1486 */
1487
search(__s32 * array,__s32 value,unsigned n)1488 static int search(__s32 *array, __s32 value, unsigned n)
1489 {
1490 while (n--) {
1491 if (*array++ == value)
1492 return 0;
1493 }
1494 return -1;
1495 }
1496
1497 /**
1498 * hid_match_report - check if driver's raw_event should be called
1499 *
1500 * @hid: hid device
1501 * @report: hid report to match against
1502 *
1503 * compare hid->driver->report_table->report_type to report->type
1504 */
hid_match_report(struct hid_device * hid,struct hid_report * report)1505 static int hid_match_report(struct hid_device *hid, struct hid_report *report)
1506 {
1507 const struct hid_report_id *id = hid->driver->report_table;
1508
1509 if (!id) /* NULL means all */
1510 return 1;
1511
1512 for (; id->report_type != HID_TERMINATOR; id++)
1513 if (id->report_type == HID_ANY_ID ||
1514 id->report_type == report->type)
1515 return 1;
1516 return 0;
1517 }
1518
1519 /**
1520 * hid_match_usage - check if driver's event should be called
1521 *
1522 * @hid: hid device
1523 * @usage: usage to match against
1524 *
1525 * compare hid->driver->usage_table->usage_{type,code} to
1526 * usage->usage_{type,code}
1527 */
hid_match_usage(struct hid_device * hid,struct hid_usage * usage)1528 static int hid_match_usage(struct hid_device *hid, struct hid_usage *usage)
1529 {
1530 const struct hid_usage_id *id = hid->driver->usage_table;
1531
1532 if (!id) /* NULL means all */
1533 return 1;
1534
1535 for (; id->usage_type != HID_ANY_ID - 1; id++)
1536 if ((id->usage_hid == HID_ANY_ID ||
1537 id->usage_hid == usage->hid) &&
1538 (id->usage_type == HID_ANY_ID ||
1539 id->usage_type == usage->type) &&
1540 (id->usage_code == HID_ANY_ID ||
1541 id->usage_code == usage->code))
1542 return 1;
1543 return 0;
1544 }
1545
hid_process_event(struct hid_device * hid,struct hid_field * field,struct hid_usage * usage,__s32 value,int interrupt)1546 static void hid_process_event(struct hid_device *hid, struct hid_field *field,
1547 struct hid_usage *usage, __s32 value, int interrupt)
1548 {
1549 struct hid_driver *hdrv = hid->driver;
1550 int ret;
1551
1552 if (!list_empty(&hid->debug_list))
1553 hid_dump_input(hid, usage, value);
1554
1555 if (hdrv && hdrv->event && hid_match_usage(hid, usage)) {
1556 ret = hdrv->event(hid, field, usage, value);
1557 if (ret != 0) {
1558 if (ret < 0)
1559 hid_err(hid, "%s's event failed with %d\n",
1560 hdrv->name, ret);
1561 return;
1562 }
1563 }
1564
1565 if (hid->claimed & HID_CLAIMED_INPUT)
1566 hidinput_hid_event(hid, field, usage, value);
1567 if (hid->claimed & HID_CLAIMED_HIDDEV && interrupt && hid->hiddev_hid_event)
1568 hid->hiddev_hid_event(hid, field, usage, value);
1569 }
1570
1571 /*
1572 * Checks if the given value is valid within this field
1573 */
hid_array_value_is_valid(struct hid_field * field,__s32 value)1574 static inline int hid_array_value_is_valid(struct hid_field *field,
1575 __s32 value)
1576 {
1577 __s32 min = field->logical_minimum;
1578
1579 /*
1580 * Value needs to be between logical min and max, and
1581 * (value - min) is used as an index in the usage array.
1582 * This array is of size field->maxusage
1583 */
1584 return value >= min &&
1585 value <= field->logical_maximum &&
1586 value - min < field->maxusage;
1587 }
1588
1589 /*
1590 * Fetch the field from the data. The field content is stored for next
1591 * report processing (we do differential reporting to the layer).
1592 */
hid_input_fetch_field(struct hid_device * hid,struct hid_field * field,__u8 * data)1593 static void hid_input_fetch_field(struct hid_device *hid,
1594 struct hid_field *field,
1595 __u8 *data)
1596 {
1597 unsigned n;
1598 unsigned count = field->report_count;
1599 unsigned offset = field->report_offset;
1600 unsigned size = field->report_size;
1601 __s32 min = field->logical_minimum;
1602 __s32 *value;
1603
1604 value = field->new_value;
1605 memset(value, 0, count * sizeof(__s32));
1606 field->ignored = false;
1607
1608 for (n = 0; n < count; n++) {
1609
1610 value[n] = min < 0 ?
1611 snto32(hid_field_extract(hid, data, offset + n * size,
1612 size), size) :
1613 hid_field_extract(hid, data, offset + n * size, size);
1614
1615 /* Ignore report if ErrorRollOver */
1616 if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
1617 hid_array_value_is_valid(field, value[n]) &&
1618 field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1) {
1619 field->ignored = true;
1620 return;
1621 }
1622 }
1623 }
1624
1625 /*
1626 * Process a received variable field.
1627 */
1628
hid_input_var_field(struct hid_device * hid,struct hid_field * field,int interrupt)1629 static void hid_input_var_field(struct hid_device *hid,
1630 struct hid_field *field,
1631 int interrupt)
1632 {
1633 unsigned int count = field->report_count;
1634 __s32 *value = field->new_value;
1635 unsigned int n;
1636
1637 for (n = 0; n < count; n++)
1638 hid_process_event(hid,
1639 field,
1640 &field->usage[n],
1641 value[n],
1642 interrupt);
1643
1644 memcpy(field->value, value, count * sizeof(__s32));
1645 }
1646
1647 /*
1648 * Process a received array field. The field content is stored for
1649 * next report processing (we do differential reporting to the layer).
1650 */
1651
hid_input_array_field(struct hid_device * hid,struct hid_field * field,int interrupt)1652 static void hid_input_array_field(struct hid_device *hid,
1653 struct hid_field *field,
1654 int interrupt)
1655 {
1656 unsigned int n;
1657 unsigned int count = field->report_count;
1658 __s32 min = field->logical_minimum;
1659 __s32 *value;
1660
1661 value = field->new_value;
1662
1663 /* ErrorRollOver */
1664 if (field->ignored)
1665 return;
1666
1667 for (n = 0; n < count; n++) {
1668 if (hid_array_value_is_valid(field, field->value[n]) &&
1669 search(value, field->value[n], count))
1670 hid_process_event(hid,
1671 field,
1672 &field->usage[field->value[n] - min],
1673 0,
1674 interrupt);
1675
1676 if (hid_array_value_is_valid(field, value[n]) &&
1677 search(field->value, value[n], count))
1678 hid_process_event(hid,
1679 field,
1680 &field->usage[value[n] - min],
1681 1,
1682 interrupt);
1683 }
1684
1685 memcpy(field->value, value, count * sizeof(__s32));
1686 }
1687
1688 /*
1689 * Analyse a received report, and fetch the data from it. The field
1690 * content is stored for next report processing (we do differential
1691 * reporting to the layer).
1692 */
hid_process_report(struct hid_device * hid,struct hid_report * report,__u8 * data,int interrupt)1693 static void hid_process_report(struct hid_device *hid,
1694 struct hid_report *report,
1695 __u8 *data,
1696 int interrupt)
1697 {
1698 unsigned int a;
1699 struct hid_field_entry *entry;
1700 struct hid_field *field;
1701
1702 /* first retrieve all incoming values in data */
1703 for (a = 0; a < report->maxfield; a++)
1704 hid_input_fetch_field(hid, report->field[a], data);
1705
1706 if (!list_empty(&report->field_entry_list)) {
1707 /* INPUT_REPORT, we have a priority list of fields */
1708 list_for_each_entry(entry,
1709 &report->field_entry_list,
1710 list) {
1711 field = entry->field;
1712
1713 if (field->flags & HID_MAIN_ITEM_VARIABLE)
1714 hid_process_event(hid,
1715 field,
1716 &field->usage[entry->index],
1717 field->new_value[entry->index],
1718 interrupt);
1719 else
1720 hid_input_array_field(hid, field, interrupt);
1721 }
1722
1723 /* we need to do the memcpy at the end for var items */
1724 for (a = 0; a < report->maxfield; a++) {
1725 field = report->field[a];
1726
1727 if (field->flags & HID_MAIN_ITEM_VARIABLE)
1728 memcpy(field->value, field->new_value,
1729 field->report_count * sizeof(__s32));
1730 }
1731 } else {
1732 /* FEATURE_REPORT, regular processing */
1733 for (a = 0; a < report->maxfield; a++) {
1734 field = report->field[a];
1735
1736 if (field->flags & HID_MAIN_ITEM_VARIABLE)
1737 hid_input_var_field(hid, field, interrupt);
1738 else
1739 hid_input_array_field(hid, field, interrupt);
1740 }
1741 }
1742 }
1743
1744 /*
1745 * Insert a given usage_index in a field in the list
1746 * of processed usages in the report.
1747 *
1748 * The elements of lower priority score are processed
1749 * first.
1750 */
__hid_insert_field_entry(struct hid_device * hid,struct hid_report * report,struct hid_field_entry * entry,struct hid_field * field,unsigned int usage_index)1751 static void __hid_insert_field_entry(struct hid_device *hid,
1752 struct hid_report *report,
1753 struct hid_field_entry *entry,
1754 struct hid_field *field,
1755 unsigned int usage_index)
1756 {
1757 struct hid_field_entry *next;
1758
1759 entry->field = field;
1760 entry->index = usage_index;
1761 entry->priority = field->usages_priorities[usage_index];
1762
1763 /* insert the element at the correct position */
1764 list_for_each_entry(next,
1765 &report->field_entry_list,
1766 list) {
1767 /*
1768 * the priority of our element is strictly higher
1769 * than the next one, insert it before
1770 */
1771 if (entry->priority > next->priority) {
1772 list_add_tail(&entry->list, &next->list);
1773 return;
1774 }
1775 }
1776
1777 /* lowest priority score: insert at the end */
1778 list_add_tail(&entry->list, &report->field_entry_list);
1779 }
1780
hid_report_process_ordering(struct hid_device * hid,struct hid_report * report)1781 static void hid_report_process_ordering(struct hid_device *hid,
1782 struct hid_report *report)
1783 {
1784 struct hid_field *field;
1785 struct hid_field_entry *entries;
1786 unsigned int a, u, usages;
1787 unsigned int count = 0;
1788
1789 /* count the number of individual fields in the report */
1790 for (a = 0; a < report->maxfield; a++) {
1791 field = report->field[a];
1792
1793 if (field->flags & HID_MAIN_ITEM_VARIABLE)
1794 count += field->report_count;
1795 else
1796 count++;
1797 }
1798
1799 /* allocate the memory to process the fields */
1800 entries = kzalloc_objs(*entries, count, GFP_KERNEL);
1801 if (!entries)
1802 return;
1803
1804 report->field_entries = entries;
1805
1806 /*
1807 * walk through all fields in the report and
1808 * store them by priority order in report->field_entry_list
1809 *
1810 * - Var elements are individualized (field + usage_index)
1811 * - Arrays are taken as one, we can not chose an order for them
1812 */
1813 usages = 0;
1814 for (a = 0; a < report->maxfield; a++) {
1815 field = report->field[a];
1816
1817 if (field->flags & HID_MAIN_ITEM_VARIABLE) {
1818 for (u = 0; u < field->report_count; u++) {
1819 __hid_insert_field_entry(hid, report,
1820 &entries[usages],
1821 field, u);
1822 usages++;
1823 }
1824 } else {
1825 __hid_insert_field_entry(hid, report, &entries[usages],
1826 field, 0);
1827 usages++;
1828 }
1829 }
1830 }
1831
hid_process_ordering(struct hid_device * hid)1832 static void hid_process_ordering(struct hid_device *hid)
1833 {
1834 struct hid_report *report;
1835 struct hid_report_enum *report_enum = &hid->report_enum[HID_INPUT_REPORT];
1836
1837 list_for_each_entry(report, &report_enum->report_list, list)
1838 hid_report_process_ordering(hid, report);
1839 }
1840
1841 /*
1842 * Output the field into the report.
1843 */
1844
hid_output_field(const struct hid_device * hid,struct hid_field * field,__u8 * data)1845 static void hid_output_field(const struct hid_device *hid,
1846 struct hid_field *field, __u8 *data)
1847 {
1848 unsigned count = field->report_count;
1849 unsigned offset = field->report_offset;
1850 unsigned size = field->report_size;
1851 unsigned n;
1852
1853 for (n = 0; n < count; n++) {
1854 if (field->logical_minimum < 0) /* signed values */
1855 implement(hid, data, offset + n * size, size,
1856 s32ton(field->value[n], size));
1857 else /* unsigned values */
1858 implement(hid, data, offset + n * size, size,
1859 field->value[n]);
1860 }
1861 }
1862
1863 /*
1864 * Compute the size of a report.
1865 */
hid_compute_report_size(struct hid_report * report)1866 static size_t hid_compute_report_size(struct hid_report *report)
1867 {
1868 if (report->size)
1869 return ((report->size - 1) >> 3) + 1;
1870
1871 return 0;
1872 }
1873
1874 /*
1875 * Create a report. 'data' has to be allocated using
1876 * hid_alloc_report_buf() so that it has proper size.
1877 */
1878
hid_output_report(struct hid_report * report,__u8 * data)1879 void hid_output_report(struct hid_report *report, __u8 *data)
1880 {
1881 unsigned n;
1882
1883 if (report->id > 0)
1884 *data++ = report->id;
1885
1886 memset(data, 0, hid_compute_report_size(report));
1887 for (n = 0; n < report->maxfield; n++)
1888 hid_output_field(report->device, report->field[n], data);
1889 }
1890 EXPORT_SYMBOL_GPL(hid_output_report);
1891
1892 /*
1893 * Allocator for buffer that is going to be passed to hid_output_report()
1894 */
hid_alloc_report_buf(struct hid_report * report,gfp_t flags)1895 u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
1896 {
1897 /*
1898 * 7 extra bytes are necessary to achieve proper functionality
1899 * of implement() working on 8 byte chunks
1900 * 1 extra byte for the report ID if it is null (not used) so
1901 * we can reserve that extra byte in the first position of the buffer
1902 * when sending it to .raw_request()
1903 */
1904
1905 u32 len = hid_report_len(report) + 7 + (report->id == 0);
1906
1907 return kzalloc(len, flags);
1908 }
1909 EXPORT_SYMBOL_GPL(hid_alloc_report_buf);
1910
1911 /*
1912 * Set a field value. The report this field belongs to has to be
1913 * created and transferred to the device, to set this value in the
1914 * device.
1915 */
1916
hid_set_field(struct hid_field * field,unsigned offset,__s32 value)1917 int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
1918 {
1919 unsigned size;
1920
1921 if (!field)
1922 return -1;
1923
1924 size = field->report_size;
1925
1926 hid_dump_input(field->report->device, field->usage + offset, value);
1927
1928 if (offset >= field->report_count) {
1929 hid_err(field->report->device, "offset (%d) exceeds report_count (%d)\n",
1930 offset, field->report_count);
1931 return -1;
1932 }
1933 if (field->logical_minimum < 0) {
1934 if (value != snto32(s32ton(value, size), size)) {
1935 hid_err(field->report->device, "value %d is out of range\n", value);
1936 return -1;
1937 }
1938 }
1939 field->value[offset] = value;
1940 return 0;
1941 }
1942 EXPORT_SYMBOL_GPL(hid_set_field);
1943
hid_find_field(struct hid_device * hdev,unsigned int report_type,unsigned int application,unsigned int usage)1944 struct hid_field *hid_find_field(struct hid_device *hdev, unsigned int report_type,
1945 unsigned int application, unsigned int usage)
1946 {
1947 struct list_head *report_list = &hdev->report_enum[report_type].report_list;
1948 struct hid_report *report;
1949 int i, j;
1950
1951 list_for_each_entry(report, report_list, list) {
1952 if (report->application != application)
1953 continue;
1954
1955 for (i = 0; i < report->maxfield; i++) {
1956 struct hid_field *field = report->field[i];
1957
1958 for (j = 0; j < field->maxusage; j++) {
1959 if (field->usage[j].hid == usage)
1960 return field;
1961 }
1962 }
1963 }
1964
1965 return NULL;
1966 }
1967 EXPORT_SYMBOL_GPL(hid_find_field);
1968
hid_get_report(struct hid_report_enum * report_enum,const u8 * data)1969 static struct hid_report *hid_get_report(struct hid_report_enum *report_enum,
1970 const u8 *data)
1971 {
1972 struct hid_report *report;
1973 unsigned int n = 0; /* Normally report number is 0 */
1974
1975 /* Device uses numbered reports, data[0] is report number */
1976 if (report_enum->numbered)
1977 n = *data;
1978
1979 report = report_enum->report_id_hash[n];
1980 if (report == NULL)
1981 dbg_hid("undefined report_id %u received\n", n);
1982
1983 return report;
1984 }
1985
1986 /*
1987 * Implement a generic .request() callback, using .raw_request()
1988 * DO NOT USE in hid drivers directly, but through hid_hw_request instead.
1989 */
__hid_request(struct hid_device * hid,struct hid_report * report,enum hid_class_request reqtype)1990 int __hid_request(struct hid_device *hid, struct hid_report *report,
1991 enum hid_class_request reqtype)
1992 {
1993 char *buf, *data_buf;
1994 int ret;
1995 u32 len;
1996
1997 buf = hid_alloc_report_buf(report, GFP_KERNEL);
1998 if (!buf)
1999 return -ENOMEM;
2000
2001 data_buf = buf;
2002 len = hid_report_len(report);
2003
2004 if (report->id == 0) {
2005 /* reserve the first byte for the report ID */
2006 data_buf++;
2007 len++;
2008 }
2009
2010 if (reqtype == HID_REQ_SET_REPORT)
2011 hid_output_report(report, data_buf);
2012
2013 ret = hid_hw_raw_request(hid, report->id, buf, len, report->type, reqtype);
2014 if (ret < 0) {
2015 dbg_hid("unable to complete request: %d\n", ret);
2016 goto out;
2017 }
2018
2019 if (reqtype == HID_REQ_GET_REPORT)
2020 hid_input_report(hid, report->type, buf, ret, 0);
2021
2022 ret = 0;
2023
2024 out:
2025 kfree(buf);
2026 return ret;
2027 }
2028 EXPORT_SYMBOL_GPL(__hid_request);
2029
hid_report_raw_event(struct hid_device * hid,enum hid_report_type type,u8 * data,u32 size,int interrupt)2030 int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
2031 int interrupt)
2032 {
2033 struct hid_report_enum *report_enum = hid->report_enum + type;
2034 struct hid_report *report;
2035 struct hid_driver *hdrv;
2036 int max_buffer_size = HID_MAX_BUFFER_SIZE;
2037 u32 rsize, csize = size;
2038 u8 *cdata = data;
2039 int ret = 0;
2040
2041 report = hid_get_report(report_enum, data);
2042 if (!report)
2043 goto out;
2044
2045 if (report_enum->numbered) {
2046 cdata++;
2047 csize--;
2048 }
2049
2050 rsize = hid_compute_report_size(report);
2051
2052 if (hid->ll_driver->max_buffer_size)
2053 max_buffer_size = hid->ll_driver->max_buffer_size;
2054
2055 if (report_enum->numbered && rsize >= max_buffer_size)
2056 rsize = max_buffer_size - 1;
2057 else if (rsize > max_buffer_size)
2058 rsize = max_buffer_size;
2059
2060 if (csize < rsize) {
2061 dbg_hid("report %d is too short, (%d < %d)\n", report->id,
2062 csize, rsize);
2063 memset(cdata + csize, 0, rsize - csize);
2064 }
2065
2066 if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event)
2067 hid->hiddev_report_event(hid, report);
2068 if (hid->claimed & HID_CLAIMED_HIDRAW) {
2069 ret = hidraw_report_event(hid, data, size);
2070 if (ret)
2071 goto out;
2072 }
2073
2074 if (hid->claimed != HID_CLAIMED_HIDRAW && report->maxfield) {
2075 hid_process_report(hid, report, cdata, interrupt);
2076 hdrv = hid->driver;
2077 if (hdrv && hdrv->report)
2078 hdrv->report(hid, report);
2079 }
2080
2081 if (hid->claimed & HID_CLAIMED_INPUT)
2082 hidinput_report_event(hid, report);
2083 out:
2084 return ret;
2085 }
2086 EXPORT_SYMBOL_GPL(hid_report_raw_event);
2087
2088
__hid_input_report(struct hid_device * hid,enum hid_report_type type,u8 * data,u32 size,int interrupt,u64 source,bool from_bpf,bool lock_already_taken)2089 static int __hid_input_report(struct hid_device *hid, enum hid_report_type type,
2090 u8 *data, u32 size, int interrupt, u64 source, bool from_bpf,
2091 bool lock_already_taken)
2092 {
2093 struct hid_report_enum *report_enum;
2094 struct hid_driver *hdrv;
2095 struct hid_report *report;
2096 int ret = 0;
2097
2098 if (!hid)
2099 return -ENODEV;
2100
2101 ret = down_trylock(&hid->driver_input_lock);
2102 if (lock_already_taken && !ret) {
2103 up(&hid->driver_input_lock);
2104 return -EINVAL;
2105 } else if (!lock_already_taken && ret) {
2106 return -EBUSY;
2107 }
2108
2109 if (!hid->driver) {
2110 ret = -ENODEV;
2111 goto unlock;
2112 }
2113 report_enum = hid->report_enum + type;
2114 hdrv = hid->driver;
2115
2116 data = dispatch_hid_bpf_device_event(hid, type, data, &size, interrupt, source, from_bpf);
2117 if (IS_ERR(data)) {
2118 ret = PTR_ERR(data);
2119 goto unlock;
2120 }
2121
2122 if (!size) {
2123 dbg_hid("empty report\n");
2124 ret = -1;
2125 goto unlock;
2126 }
2127
2128 /* Avoid unnecessary overhead if debugfs is disabled */
2129 if (!list_empty(&hid->debug_list))
2130 hid_dump_report(hid, type, data, size);
2131
2132 report = hid_get_report(report_enum, data);
2133
2134 if (!report) {
2135 ret = -1;
2136 goto unlock;
2137 }
2138
2139 if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) {
2140 ret = hdrv->raw_event(hid, report, data, size);
2141 if (ret < 0)
2142 goto unlock;
2143 }
2144
2145 ret = hid_report_raw_event(hid, type, data, size, interrupt);
2146
2147 unlock:
2148 if (!lock_already_taken)
2149 up(&hid->driver_input_lock);
2150 return ret;
2151 }
2152
2153 /**
2154 * hid_input_report - report data from lower layer (usb, bt...)
2155 *
2156 * @hid: hid device
2157 * @type: HID report type (HID_*_REPORT)
2158 * @data: report contents
2159 * @size: size of data parameter
2160 * @interrupt: distinguish between interrupt and control transfers
2161 *
2162 * This is data entry for lower layers.
2163 */
hid_input_report(struct hid_device * hid,enum hid_report_type type,u8 * data,u32 size,int interrupt)2164 int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
2165 int interrupt)
2166 {
2167 return __hid_input_report(hid, type, data, size, interrupt, 0,
2168 false, /* from_bpf */
2169 false /* lock_already_taken */);
2170 }
2171 EXPORT_SYMBOL_GPL(hid_input_report);
2172
hid_match_one_id(const struct hid_device * hdev,const struct hid_device_id * id)2173 bool hid_match_one_id(const struct hid_device *hdev,
2174 const struct hid_device_id *id)
2175 {
2176 return (id->bus == HID_BUS_ANY || id->bus == hdev->bus) &&
2177 (id->group == HID_GROUP_ANY || id->group == hdev->group) &&
2178 (id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) &&
2179 (id->product == HID_ANY_ID || id->product == hdev->product);
2180 }
2181
hid_match_id(const struct hid_device * hdev,const struct hid_device_id * id)2182 const struct hid_device_id *hid_match_id(const struct hid_device *hdev,
2183 const struct hid_device_id *id)
2184 {
2185 for (; id->bus; id++)
2186 if (hid_match_one_id(hdev, id))
2187 return id;
2188
2189 return NULL;
2190 }
2191 EXPORT_SYMBOL_GPL(hid_match_id);
2192
2193 static const struct hid_device_id hid_hiddev_list[] = {
2194 { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS) },
2195 { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS1) },
2196 { }
2197 };
2198
hid_hiddev(struct hid_device * hdev)2199 static bool hid_hiddev(struct hid_device *hdev)
2200 {
2201 return !!hid_match_id(hdev, hid_hiddev_list);
2202 }
2203
2204
2205 static ssize_t
report_descriptor_read(struct file * filp,struct kobject * kobj,const struct bin_attribute * attr,char * buf,loff_t off,size_t count)2206 report_descriptor_read(struct file *filp, struct kobject *kobj,
2207 const struct bin_attribute *attr,
2208 char *buf, loff_t off, size_t count)
2209 {
2210 struct device *dev = kobj_to_dev(kobj);
2211 struct hid_device *hdev = to_hid_device(dev);
2212
2213 if (off >= hdev->rsize)
2214 return 0;
2215
2216 if (off + count > hdev->rsize)
2217 count = hdev->rsize - off;
2218
2219 memcpy(buf, hdev->rdesc + off, count);
2220
2221 return count;
2222 }
2223
2224 static ssize_t
country_show(struct device * dev,struct device_attribute * attr,char * buf)2225 country_show(struct device *dev, struct device_attribute *attr,
2226 char *buf)
2227 {
2228 struct hid_device *hdev = to_hid_device(dev);
2229
2230 return sprintf(buf, "%02x\n", hdev->country & 0xff);
2231 }
2232
2233 static const BIN_ATTR_RO(report_descriptor, HID_MAX_DESCRIPTOR_SIZE);
2234
2235 static const DEVICE_ATTR_RO(country);
2236
hid_connect(struct hid_device * hdev,unsigned int connect_mask)2237 int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
2238 {
2239 static const char *types[] = { "Device", "Pointer", "Mouse", "Device",
2240 "Joystick", "Gamepad", "Keyboard", "Keypad",
2241 "Multi-Axis Controller"
2242 };
2243 const char *type, *bus;
2244 char buf[64] = "";
2245 unsigned int i;
2246 int len;
2247 int ret;
2248
2249 ret = hid_bpf_connect_device(hdev);
2250 if (ret)
2251 return ret;
2252
2253 if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE)
2254 connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV);
2255 if (hdev->quirks & HID_QUIRK_HIDINPUT_FORCE)
2256 connect_mask |= HID_CONNECT_HIDINPUT_FORCE;
2257 if (hdev->bus != BUS_USB)
2258 connect_mask &= ~HID_CONNECT_HIDDEV;
2259 if (hid_hiddev(hdev))
2260 connect_mask |= HID_CONNECT_HIDDEV_FORCE;
2261
2262 if ((connect_mask & HID_CONNECT_HIDINPUT) && !hidinput_connect(hdev,
2263 connect_mask & HID_CONNECT_HIDINPUT_FORCE))
2264 hdev->claimed |= HID_CLAIMED_INPUT;
2265
2266 if ((connect_mask & HID_CONNECT_HIDDEV) && hdev->hiddev_connect &&
2267 !hdev->hiddev_connect(hdev,
2268 connect_mask & HID_CONNECT_HIDDEV_FORCE))
2269 hdev->claimed |= HID_CLAIMED_HIDDEV;
2270 if ((connect_mask & HID_CONNECT_HIDRAW) && !hidraw_connect(hdev))
2271 hdev->claimed |= HID_CLAIMED_HIDRAW;
2272
2273 if (connect_mask & HID_CONNECT_DRIVER)
2274 hdev->claimed |= HID_CLAIMED_DRIVER;
2275
2276 /* Drivers with the ->raw_event callback set are not required to connect
2277 * to any other listener. */
2278 if (!hdev->claimed && !hdev->driver->raw_event) {
2279 hid_err(hdev, "device has no listeners, quitting\n");
2280 return -ENODEV;
2281 }
2282
2283 hid_process_ordering(hdev);
2284
2285 if ((hdev->claimed & HID_CLAIMED_INPUT) &&
2286 (connect_mask & HID_CONNECT_FF) && hdev->ff_init)
2287 hdev->ff_init(hdev);
2288
2289 len = 0;
2290 if (hdev->claimed & HID_CLAIMED_INPUT)
2291 len += sprintf(buf + len, "input");
2292 if (hdev->claimed & HID_CLAIMED_HIDDEV)
2293 len += sprintf(buf + len, "%shiddev%d", len ? "," : "",
2294 ((struct hiddev *)hdev->hiddev)->minor);
2295 if (hdev->claimed & HID_CLAIMED_HIDRAW)
2296 len += sprintf(buf + len, "%shidraw%d", len ? "," : "",
2297 ((struct hidraw *)hdev->hidraw)->minor);
2298
2299 type = "Device";
2300 for (i = 0; i < hdev->maxcollection; i++) {
2301 struct hid_collection *col = &hdev->collection[i];
2302 if (col->type == HID_COLLECTION_APPLICATION &&
2303 (col->usage & HID_USAGE_PAGE) == HID_UP_GENDESK &&
2304 (col->usage & 0xffff) < ARRAY_SIZE(types)) {
2305 type = types[col->usage & 0xffff];
2306 break;
2307 }
2308 }
2309
2310 switch (hdev->bus) {
2311 case BUS_USB:
2312 bus = "USB";
2313 break;
2314 case BUS_BLUETOOTH:
2315 bus = "BLUETOOTH";
2316 break;
2317 case BUS_I2C:
2318 bus = "I2C";
2319 break;
2320 case BUS_SDW:
2321 bus = "SOUNDWIRE";
2322 break;
2323 case BUS_VIRTUAL:
2324 bus = "VIRTUAL";
2325 break;
2326 case BUS_INTEL_ISHTP:
2327 case BUS_AMD_SFH:
2328 bus = "SENSOR HUB";
2329 break;
2330 default:
2331 bus = "<UNKNOWN>";
2332 }
2333
2334 ret = device_create_file(&hdev->dev, &dev_attr_country);
2335 if (ret)
2336 hid_warn(hdev,
2337 "can't create sysfs country code attribute err: %d\n", ret);
2338
2339 hid_info(hdev, "%s: %s HID v%x.%02x %s [%s] on %s\n",
2340 buf, bus, hdev->version >> 8, hdev->version & 0xff,
2341 type, hdev->name, hdev->phys);
2342
2343 return 0;
2344 }
2345 EXPORT_SYMBOL_GPL(hid_connect);
2346
hid_disconnect(struct hid_device * hdev)2347 void hid_disconnect(struct hid_device *hdev)
2348 {
2349 device_remove_file(&hdev->dev, &dev_attr_country);
2350 if (hdev->claimed & HID_CLAIMED_INPUT)
2351 hidinput_disconnect(hdev);
2352 if (hdev->claimed & HID_CLAIMED_HIDDEV)
2353 hdev->hiddev_disconnect(hdev);
2354 if (hdev->claimed & HID_CLAIMED_HIDRAW)
2355 hidraw_disconnect(hdev);
2356 hdev->claimed = 0;
2357
2358 hid_bpf_disconnect_device(hdev);
2359 }
2360 EXPORT_SYMBOL_GPL(hid_disconnect);
2361
2362 /**
2363 * hid_hw_start - start underlying HW
2364 * @hdev: hid device
2365 * @connect_mask: which outputs to connect, see HID_CONNECT_*
2366 *
2367 * Call this in probe function *after* hid_parse. This will setup HW
2368 * buffers and start the device (if not defeirred to device open).
2369 * hid_hw_stop must be called if this was successful.
2370 */
hid_hw_start(struct hid_device * hdev,unsigned int connect_mask)2371 int hid_hw_start(struct hid_device *hdev, unsigned int connect_mask)
2372 {
2373 int error;
2374
2375 error = hdev->ll_driver->start(hdev);
2376 if (error)
2377 return error;
2378
2379 if (connect_mask) {
2380 error = hid_connect(hdev, connect_mask);
2381 if (error) {
2382 hdev->ll_driver->stop(hdev);
2383 return error;
2384 }
2385 }
2386
2387 return 0;
2388 }
2389 EXPORT_SYMBOL_GPL(hid_hw_start);
2390
2391 /**
2392 * hid_hw_stop - stop underlying HW
2393 * @hdev: hid device
2394 *
2395 * This is usually called from remove function or from probe when something
2396 * failed and hid_hw_start was called already.
2397 */
hid_hw_stop(struct hid_device * hdev)2398 void hid_hw_stop(struct hid_device *hdev)
2399 {
2400 hid_disconnect(hdev);
2401 hdev->ll_driver->stop(hdev);
2402 }
2403 EXPORT_SYMBOL_GPL(hid_hw_stop);
2404
2405 /**
2406 * hid_hw_open - signal underlying HW to start delivering events
2407 * @hdev: hid device
2408 *
2409 * Tell underlying HW to start delivering events from the device.
2410 * This function should be called sometime after successful call
2411 * to hid_hw_start().
2412 */
hid_hw_open(struct hid_device * hdev)2413 int hid_hw_open(struct hid_device *hdev)
2414 {
2415 int ret;
2416
2417 ret = mutex_lock_killable(&hdev->ll_open_lock);
2418 if (ret)
2419 return ret;
2420
2421 if (!hdev->ll_open_count++) {
2422 ret = hdev->ll_driver->open(hdev);
2423 if (ret)
2424 hdev->ll_open_count--;
2425
2426 if (hdev->driver->on_hid_hw_open)
2427 hdev->driver->on_hid_hw_open(hdev);
2428 }
2429
2430 mutex_unlock(&hdev->ll_open_lock);
2431 return ret;
2432 }
2433 EXPORT_SYMBOL_GPL(hid_hw_open);
2434
2435 /**
2436 * hid_hw_close - signal underlaying HW to stop delivering events
2437 *
2438 * @hdev: hid device
2439 *
2440 * This function indicates that we are not interested in the events
2441 * from this device anymore. Delivery of events may or may not stop,
2442 * depending on the number of users still outstanding.
2443 */
hid_hw_close(struct hid_device * hdev)2444 void hid_hw_close(struct hid_device *hdev)
2445 {
2446 mutex_lock(&hdev->ll_open_lock);
2447 if (!--hdev->ll_open_count) {
2448 hdev->ll_driver->close(hdev);
2449
2450 if (hdev->driver->on_hid_hw_close)
2451 hdev->driver->on_hid_hw_close(hdev);
2452 }
2453 mutex_unlock(&hdev->ll_open_lock);
2454 }
2455 EXPORT_SYMBOL_GPL(hid_hw_close);
2456
2457 /**
2458 * hid_hw_request - send report request to device
2459 *
2460 * @hdev: hid device
2461 * @report: report to send
2462 * @reqtype: hid request type
2463 */
hid_hw_request(struct hid_device * hdev,struct hid_report * report,enum hid_class_request reqtype)2464 void hid_hw_request(struct hid_device *hdev,
2465 struct hid_report *report, enum hid_class_request reqtype)
2466 {
2467 if (hdev->ll_driver->request)
2468 return hdev->ll_driver->request(hdev, report, reqtype);
2469
2470 __hid_request(hdev, report, reqtype);
2471 }
2472 EXPORT_SYMBOL_GPL(hid_hw_request);
2473
__hid_hw_raw_request(struct hid_device * hdev,unsigned char reportnum,__u8 * buf,size_t len,enum hid_report_type rtype,enum hid_class_request reqtype,u64 source,bool from_bpf)2474 int __hid_hw_raw_request(struct hid_device *hdev,
2475 unsigned char reportnum, __u8 *buf,
2476 size_t len, enum hid_report_type rtype,
2477 enum hid_class_request reqtype,
2478 u64 source, bool from_bpf)
2479 {
2480 unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
2481 int ret;
2482
2483 if (hdev->ll_driver->max_buffer_size)
2484 max_buffer_size = hdev->ll_driver->max_buffer_size;
2485
2486 if (len < 1 || len > max_buffer_size || !buf)
2487 return -EINVAL;
2488
2489 ret = dispatch_hid_bpf_raw_requests(hdev, reportnum, buf, len, rtype,
2490 reqtype, source, from_bpf);
2491 if (ret)
2492 return ret;
2493
2494 return hdev->ll_driver->raw_request(hdev, reportnum, buf, len,
2495 rtype, reqtype);
2496 }
2497
2498 /**
2499 * hid_hw_raw_request - send report request to device
2500 *
2501 * @hdev: hid device
2502 * @reportnum: report ID
2503 * @buf: in/out data to transfer
2504 * @len: length of buf
2505 * @rtype: HID report type
2506 * @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT
2507 *
2508 * Return: count of data transferred, negative if error
2509 *
2510 * Same behavior as hid_hw_request, but with raw buffers instead.
2511 */
hid_hw_raw_request(struct hid_device * hdev,unsigned char reportnum,__u8 * buf,size_t len,enum hid_report_type rtype,enum hid_class_request reqtype)2512 int hid_hw_raw_request(struct hid_device *hdev,
2513 unsigned char reportnum, __u8 *buf,
2514 size_t len, enum hid_report_type rtype, enum hid_class_request reqtype)
2515 {
2516 return __hid_hw_raw_request(hdev, reportnum, buf, len, rtype, reqtype, 0, false);
2517 }
2518 EXPORT_SYMBOL_GPL(hid_hw_raw_request);
2519
__hid_hw_output_report(struct hid_device * hdev,__u8 * buf,size_t len,u64 source,bool from_bpf)2520 int __hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len, u64 source,
2521 bool from_bpf)
2522 {
2523 unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
2524 int ret;
2525
2526 if (hdev->ll_driver->max_buffer_size)
2527 max_buffer_size = hdev->ll_driver->max_buffer_size;
2528
2529 if (len < 1 || len > max_buffer_size || !buf)
2530 return -EINVAL;
2531
2532 ret = dispatch_hid_bpf_output_report(hdev, buf, len, source, from_bpf);
2533 if (ret)
2534 return ret;
2535
2536 if (hdev->ll_driver->output_report)
2537 return hdev->ll_driver->output_report(hdev, buf, len);
2538
2539 return -ENOSYS;
2540 }
2541
2542 /**
2543 * hid_hw_output_report - send output report to device
2544 *
2545 * @hdev: hid device
2546 * @buf: raw data to transfer
2547 * @len: length of buf
2548 *
2549 * Return: count of data transferred, negative if error
2550 */
hid_hw_output_report(struct hid_device * hdev,__u8 * buf,size_t len)2551 int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len)
2552 {
2553 return __hid_hw_output_report(hdev, buf, len, 0, false);
2554 }
2555 EXPORT_SYMBOL_GPL(hid_hw_output_report);
2556
2557 #ifdef CONFIG_PM
hid_driver_suspend(struct hid_device * hdev,pm_message_t state)2558 int hid_driver_suspend(struct hid_device *hdev, pm_message_t state)
2559 {
2560 if (hdev->driver && hdev->driver->suspend)
2561 return hdev->driver->suspend(hdev, state);
2562
2563 return 0;
2564 }
2565 EXPORT_SYMBOL_GPL(hid_driver_suspend);
2566
hid_driver_reset_resume(struct hid_device * hdev)2567 int hid_driver_reset_resume(struct hid_device *hdev)
2568 {
2569 if (hdev->driver && hdev->driver->reset_resume)
2570 return hdev->driver->reset_resume(hdev);
2571
2572 return 0;
2573 }
2574 EXPORT_SYMBOL_GPL(hid_driver_reset_resume);
2575
hid_driver_resume(struct hid_device * hdev)2576 int hid_driver_resume(struct hid_device *hdev)
2577 {
2578 if (hdev->driver && hdev->driver->resume)
2579 return hdev->driver->resume(hdev);
2580
2581 return 0;
2582 }
2583 EXPORT_SYMBOL_GPL(hid_driver_resume);
2584 #endif /* CONFIG_PM */
2585
2586 struct hid_dynid {
2587 struct list_head list;
2588 struct hid_device_id id;
2589 };
2590
2591 /**
2592 * new_id_store - add a new HID device ID to this driver and re-probe devices
2593 * @drv: target device driver
2594 * @buf: buffer for scanning device ID data
2595 * @count: input size
2596 *
2597 * Adds a new dynamic hid device ID to this driver,
2598 * and causes the driver to probe for all devices again.
2599 */
new_id_store(struct device_driver * drv,const char * buf,size_t count)2600 static ssize_t new_id_store(struct device_driver *drv, const char *buf,
2601 size_t count)
2602 {
2603 struct hid_driver *hdrv = to_hid_driver(drv);
2604 struct hid_dynid *dynid;
2605 __u32 bus, vendor, product;
2606 unsigned long driver_data = 0;
2607 int ret;
2608
2609 ret = sscanf(buf, "%x %x %x %lx",
2610 &bus, &vendor, &product, &driver_data);
2611 if (ret < 3)
2612 return -EINVAL;
2613
2614 dynid = kzalloc_obj(*dynid, GFP_KERNEL);
2615 if (!dynid)
2616 return -ENOMEM;
2617
2618 dynid->id.bus = bus;
2619 dynid->id.group = HID_GROUP_ANY;
2620 dynid->id.vendor = vendor;
2621 dynid->id.product = product;
2622 dynid->id.driver_data = driver_data;
2623
2624 spin_lock(&hdrv->dyn_lock);
2625 list_add_tail(&dynid->list, &hdrv->dyn_list);
2626 spin_unlock(&hdrv->dyn_lock);
2627
2628 ret = driver_attach(&hdrv->driver);
2629
2630 return ret ? : count;
2631 }
2632 static DRIVER_ATTR_WO(new_id);
2633
2634 static struct attribute *hid_drv_attrs[] = {
2635 &driver_attr_new_id.attr,
2636 NULL,
2637 };
2638 ATTRIBUTE_GROUPS(hid_drv);
2639
hid_free_dynids(struct hid_driver * hdrv)2640 static void hid_free_dynids(struct hid_driver *hdrv)
2641 {
2642 struct hid_dynid *dynid, *n;
2643
2644 spin_lock(&hdrv->dyn_lock);
2645 list_for_each_entry_safe(dynid, n, &hdrv->dyn_list, list) {
2646 list_del(&dynid->list);
2647 kfree(dynid);
2648 }
2649 spin_unlock(&hdrv->dyn_lock);
2650 }
2651
hid_match_device(struct hid_device * hdev,struct hid_driver * hdrv)2652 const struct hid_device_id *hid_match_device(struct hid_device *hdev,
2653 struct hid_driver *hdrv)
2654 {
2655 struct hid_dynid *dynid;
2656
2657 spin_lock(&hdrv->dyn_lock);
2658 list_for_each_entry(dynid, &hdrv->dyn_list, list) {
2659 if (hid_match_one_id(hdev, &dynid->id)) {
2660 spin_unlock(&hdrv->dyn_lock);
2661 return &dynid->id;
2662 }
2663 }
2664 spin_unlock(&hdrv->dyn_lock);
2665
2666 return hid_match_id(hdev, hdrv->id_table);
2667 }
2668 EXPORT_SYMBOL_GPL(hid_match_device);
2669
hid_bus_match(struct device * dev,const struct device_driver * drv)2670 static int hid_bus_match(struct device *dev, const struct device_driver *drv)
2671 {
2672 struct hid_driver *hdrv = to_hid_driver(drv);
2673 struct hid_device *hdev = to_hid_device(dev);
2674
2675 return hid_match_device(hdev, hdrv) != NULL;
2676 }
2677
2678 /**
2679 * hid_compare_device_paths - check if both devices share the same path
2680 * @hdev_a: hid device
2681 * @hdev_b: hid device
2682 * @separator: char to use as separator
2683 *
2684 * Check if two devices share the same path up to the last occurrence of
2685 * the separator char. Both paths must exist (i.e., zero-length paths
2686 * don't match).
2687 */
hid_compare_device_paths(struct hid_device * hdev_a,struct hid_device * hdev_b,char separator)2688 bool hid_compare_device_paths(struct hid_device *hdev_a,
2689 struct hid_device *hdev_b, char separator)
2690 {
2691 int n1 = strrchr(hdev_a->phys, separator) - hdev_a->phys;
2692 int n2 = strrchr(hdev_b->phys, separator) - hdev_b->phys;
2693
2694 if (n1 != n2 || n1 <= 0 || n2 <= 0)
2695 return false;
2696
2697 return !strncmp(hdev_a->phys, hdev_b->phys, n1);
2698 }
2699 EXPORT_SYMBOL_GPL(hid_compare_device_paths);
2700
hid_check_device_match(struct hid_device * hdev,struct hid_driver * hdrv,const struct hid_device_id ** id)2701 static bool hid_check_device_match(struct hid_device *hdev,
2702 struct hid_driver *hdrv,
2703 const struct hid_device_id **id)
2704 {
2705 *id = hid_match_device(hdev, hdrv);
2706 if (!*id)
2707 return false;
2708
2709 if (hdrv->match)
2710 return hdrv->match(hdev, hid_ignore_special_drivers);
2711
2712 /*
2713 * hid-generic implements .match(), so we must be dealing with a
2714 * different HID driver here, and can simply check if
2715 * hid_ignore_special_drivers or HID_QUIRK_IGNORE_SPECIAL_DRIVER
2716 * are set or not.
2717 */
2718 return !hid_ignore_special_drivers && !(hdev->quirks & HID_QUIRK_IGNORE_SPECIAL_DRIVER);
2719 }
2720
hid_set_group(struct hid_device * hdev)2721 static void hid_set_group(struct hid_device *hdev)
2722 {
2723 int ret;
2724
2725 if (hid_ignore_special_drivers) {
2726 hdev->group = HID_GROUP_GENERIC;
2727 } else if (!hdev->group &&
2728 !(hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)) {
2729 ret = hid_scan_report(hdev);
2730 if (ret)
2731 hid_warn(hdev, "bad device descriptor (%d)\n", ret);
2732 }
2733 }
2734
__hid_device_probe(struct hid_device * hdev,struct hid_driver * hdrv)2735 static int __hid_device_probe(struct hid_device *hdev, struct hid_driver *hdrv)
2736 {
2737 const struct hid_device_id *id;
2738 int ret;
2739
2740 if (!hdev->bpf_rsize) {
2741 /* we keep a reference to the currently scanned report descriptor */
2742 const __u8 *original_rdesc = hdev->bpf_rdesc;
2743
2744 if (!original_rdesc)
2745 original_rdesc = hdev->dev_rdesc;
2746
2747 /* in case a bpf program gets detached, we need to free the old one */
2748 hid_free_bpf_rdesc(hdev);
2749
2750 /* keep this around so we know we called it once */
2751 hdev->bpf_rsize = hdev->dev_rsize;
2752
2753 /* call_hid_bpf_rdesc_fixup will always return a valid pointer */
2754 hdev->bpf_rdesc = call_hid_bpf_rdesc_fixup(hdev, hdev->dev_rdesc,
2755 &hdev->bpf_rsize);
2756
2757 /* the report descriptor changed, we need to re-scan it */
2758 if (original_rdesc != hdev->bpf_rdesc) {
2759 hdev->group = 0;
2760 hid_set_group(hdev);
2761 }
2762 }
2763
2764 if (!hid_check_device_match(hdev, hdrv, &id))
2765 return -ENODEV;
2766
2767 hdev->devres_group_id = devres_open_group(&hdev->dev, NULL, GFP_KERNEL);
2768 if (!hdev->devres_group_id)
2769 return -ENOMEM;
2770
2771 /* reset the quirks that has been previously set */
2772 hdev->quirks = hid_lookup_quirk(hdev);
2773 hdev->driver = hdrv;
2774
2775 if (hdrv->probe) {
2776 ret = hdrv->probe(hdev, id);
2777 } else { /* default probe */
2778 ret = hid_open_report(hdev);
2779 if (!ret)
2780 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
2781 }
2782
2783 /*
2784 * Note that we are not closing the devres group opened above so
2785 * even resources that were attached to the device after probe is
2786 * run are released when hid_device_remove() is executed. This is
2787 * needed as some drivers would allocate additional resources,
2788 * for example when updating firmware.
2789 */
2790
2791 if (ret) {
2792 devres_release_group(&hdev->dev, hdev->devres_group_id);
2793 hid_close_report(hdev);
2794 hdev->driver = NULL;
2795 }
2796
2797 return ret;
2798 }
2799
hid_device_probe(struct device * dev)2800 static int hid_device_probe(struct device *dev)
2801 {
2802 struct hid_device *hdev = to_hid_device(dev);
2803 struct hid_driver *hdrv = to_hid_driver(dev->driver);
2804 int ret = 0;
2805
2806 if (down_interruptible(&hdev->driver_input_lock))
2807 return -EINTR;
2808
2809 hdev->io_started = false;
2810 clear_bit(ffs(HID_STAT_REPROBED), &hdev->status);
2811
2812 if (!hdev->driver)
2813 ret = __hid_device_probe(hdev, hdrv);
2814
2815 if (!hdev->io_started)
2816 up(&hdev->driver_input_lock);
2817
2818 return ret;
2819 }
2820
hid_device_remove(struct device * dev)2821 static void hid_device_remove(struct device *dev)
2822 {
2823 struct hid_device *hdev = to_hid_device(dev);
2824 struct hid_driver *hdrv;
2825
2826 down(&hdev->driver_input_lock);
2827 hdev->io_started = false;
2828
2829 hdrv = hdev->driver;
2830 if (hdrv) {
2831 if (hdrv->remove)
2832 hdrv->remove(hdev);
2833 else /* default remove */
2834 hid_hw_stop(hdev);
2835
2836 /* Release all devres resources allocated by the driver */
2837 devres_release_group(&hdev->dev, hdev->devres_group_id);
2838
2839 hid_close_report(hdev);
2840 hdev->driver = NULL;
2841 }
2842
2843 if (!hdev->io_started)
2844 up(&hdev->driver_input_lock);
2845 }
2846
modalias_show(struct device * dev,struct device_attribute * a,char * buf)2847 static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
2848 char *buf)
2849 {
2850 struct hid_device *hdev = container_of(dev, struct hid_device, dev);
2851
2852 return sysfs_emit(buf, "hid:b%04Xg%04Xv%08Xp%08X\n",
2853 hdev->bus, hdev->group, hdev->vendor, hdev->product);
2854 }
2855 static DEVICE_ATTR_RO(modalias);
2856
2857 static struct attribute *hid_dev_attrs[] = {
2858 &dev_attr_modalias.attr,
2859 NULL,
2860 };
2861 static const struct bin_attribute *hid_dev_bin_attrs[] = {
2862 &bin_attr_report_descriptor,
2863 NULL
2864 };
2865 static const struct attribute_group hid_dev_group = {
2866 .attrs = hid_dev_attrs,
2867 .bin_attrs = hid_dev_bin_attrs,
2868 };
2869 __ATTRIBUTE_GROUPS(hid_dev);
2870
hid_uevent(const struct device * dev,struct kobj_uevent_env * env)2871 static int hid_uevent(const struct device *dev, struct kobj_uevent_env *env)
2872 {
2873 const struct hid_device *hdev = to_hid_device(dev);
2874
2875 if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X",
2876 hdev->bus, hdev->vendor, hdev->product))
2877 return -ENOMEM;
2878
2879 if (add_uevent_var(env, "HID_NAME=%s", hdev->name))
2880 return -ENOMEM;
2881
2882 if (add_uevent_var(env, "HID_PHYS=%s", hdev->phys))
2883 return -ENOMEM;
2884
2885 if (add_uevent_var(env, "HID_UNIQ=%s", hdev->uniq))
2886 return -ENOMEM;
2887
2888 if (add_uevent_var(env, "MODALIAS=hid:b%04Xg%04Xv%08Xp%08X",
2889 hdev->bus, hdev->group, hdev->vendor, hdev->product))
2890 return -ENOMEM;
2891
2892 return 0;
2893 }
2894
2895 const struct bus_type hid_bus_type = {
2896 .name = "hid",
2897 .dev_groups = hid_dev_groups,
2898 .drv_groups = hid_drv_groups,
2899 .match = hid_bus_match,
2900 .probe = hid_device_probe,
2901 .remove = hid_device_remove,
2902 .uevent = hid_uevent,
2903 };
2904 EXPORT_SYMBOL(hid_bus_type);
2905
hid_add_device(struct hid_device * hdev)2906 int hid_add_device(struct hid_device *hdev)
2907 {
2908 static atomic_t id = ATOMIC_INIT(0);
2909 int ret;
2910
2911 if (WARN_ON(hdev->status & HID_STAT_ADDED))
2912 return -EBUSY;
2913
2914 hdev->quirks = hid_lookup_quirk(hdev);
2915
2916 /* we need to kill them here, otherwise they will stay allocated to
2917 * wait for coming driver */
2918 if (hid_ignore(hdev))
2919 return -ENODEV;
2920
2921 /*
2922 * Check for the mandatory transport channel.
2923 */
2924 if (!hdev->ll_driver->raw_request) {
2925 hid_err(hdev, "transport driver missing .raw_request()\n");
2926 return -EINVAL;
2927 }
2928
2929 /*
2930 * Read the device report descriptor once and use as template
2931 * for the driver-specific modifications.
2932 */
2933 ret = hdev->ll_driver->parse(hdev);
2934 if (ret)
2935 return ret;
2936 if (!hdev->dev_rdesc)
2937 return -ENODEV;
2938
2939 /*
2940 * Scan generic devices for group information
2941 */
2942 hid_set_group(hdev);
2943
2944 hdev->id = atomic_inc_return(&id);
2945
2946 /* XXX hack, any other cleaner solution after the driver core
2947 * is converted to allow more than 20 bytes as the device name? */
2948 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
2949 hdev->vendor, hdev->product, hdev->id);
2950
2951 hid_debug_register(hdev, dev_name(&hdev->dev));
2952 ret = device_add(&hdev->dev);
2953 if (!ret)
2954 hdev->status |= HID_STAT_ADDED;
2955 else
2956 hid_debug_unregister(hdev);
2957
2958 return ret;
2959 }
2960 EXPORT_SYMBOL_GPL(hid_add_device);
2961
2962 /**
2963 * hid_allocate_device - allocate new hid device descriptor
2964 *
2965 * Allocate and initialize hid device, so that hid_destroy_device might be
2966 * used to free it.
2967 *
2968 * New hid_device pointer is returned on success, otherwise ERR_PTR encoded
2969 * error value.
2970 */
hid_allocate_device(void)2971 struct hid_device *hid_allocate_device(void)
2972 {
2973 struct hid_device *hdev;
2974 int ret = -ENOMEM;
2975
2976 hdev = kzalloc_obj(*hdev, GFP_KERNEL);
2977 if (hdev == NULL)
2978 return ERR_PTR(ret);
2979
2980 device_initialize(&hdev->dev);
2981 hdev->dev.release = hid_device_release;
2982 hdev->dev.bus = &hid_bus_type;
2983 device_enable_async_suspend(&hdev->dev);
2984
2985 hid_close_report(hdev);
2986
2987 init_waitqueue_head(&hdev->debug_wait);
2988 INIT_LIST_HEAD(&hdev->debug_list);
2989 spin_lock_init(&hdev->debug_list_lock);
2990 sema_init(&hdev->driver_input_lock, 1);
2991 mutex_init(&hdev->ll_open_lock);
2992 kref_init(&hdev->ref);
2993
2994 ret = hid_bpf_device_init(hdev);
2995 if (ret)
2996 goto out_err;
2997
2998 return hdev;
2999
3000 out_err:
3001 hid_destroy_device(hdev);
3002 return ERR_PTR(ret);
3003 }
3004 EXPORT_SYMBOL_GPL(hid_allocate_device);
3005
hid_remove_device(struct hid_device * hdev)3006 static void hid_remove_device(struct hid_device *hdev)
3007 {
3008 if (hdev->status & HID_STAT_ADDED) {
3009 device_del(&hdev->dev);
3010 hid_debug_unregister(hdev);
3011 hdev->status &= ~HID_STAT_ADDED;
3012 }
3013 hid_free_bpf_rdesc(hdev);
3014 kfree(hdev->dev_rdesc);
3015 hdev->dev_rdesc = NULL;
3016 hdev->dev_rsize = 0;
3017 hdev->bpf_rsize = 0;
3018 }
3019
3020 /**
3021 * hid_destroy_device - free previously allocated device
3022 *
3023 * @hdev: hid device
3024 *
3025 * If you allocate hid_device through hid_allocate_device, you should ever
3026 * free by this function.
3027 */
hid_destroy_device(struct hid_device * hdev)3028 void hid_destroy_device(struct hid_device *hdev)
3029 {
3030 hid_bpf_destroy_device(hdev);
3031 hid_remove_device(hdev);
3032 put_device(&hdev->dev);
3033 }
3034 EXPORT_SYMBOL_GPL(hid_destroy_device);
3035
3036
__hid_bus_reprobe_drivers(struct device * dev,void * data)3037 static int __hid_bus_reprobe_drivers(struct device *dev, void *data)
3038 {
3039 struct hid_driver *hdrv = data;
3040 struct hid_device *hdev = to_hid_device(dev);
3041
3042 if (hdev->driver == hdrv &&
3043 !hdrv->match(hdev, hid_ignore_special_drivers) &&
3044 !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
3045 return device_reprobe(dev);
3046
3047 return 0;
3048 }
3049
__hid_bus_driver_added(struct device_driver * drv,void * data)3050 static int __hid_bus_driver_added(struct device_driver *drv, void *data)
3051 {
3052 struct hid_driver *hdrv = to_hid_driver(drv);
3053
3054 if (hdrv->match) {
3055 bus_for_each_dev(&hid_bus_type, NULL, hdrv,
3056 __hid_bus_reprobe_drivers);
3057 }
3058
3059 return 0;
3060 }
3061
__bus_removed_driver(struct device_driver * drv,void * data)3062 static int __bus_removed_driver(struct device_driver *drv, void *data)
3063 {
3064 return bus_rescan_devices(&hid_bus_type);
3065 }
3066
__hid_register_driver(struct hid_driver * hdrv,struct module * owner,const char * mod_name)3067 int __hid_register_driver(struct hid_driver *hdrv, struct module *owner,
3068 const char *mod_name)
3069 {
3070 int ret;
3071
3072 hdrv->driver.name = hdrv->name;
3073 hdrv->driver.bus = &hid_bus_type;
3074 hdrv->driver.owner = owner;
3075 hdrv->driver.mod_name = mod_name;
3076
3077 INIT_LIST_HEAD(&hdrv->dyn_list);
3078 spin_lock_init(&hdrv->dyn_lock);
3079
3080 ret = driver_register(&hdrv->driver);
3081
3082 if (ret == 0)
3083 bus_for_each_drv(&hid_bus_type, NULL, NULL,
3084 __hid_bus_driver_added);
3085
3086 return ret;
3087 }
3088 EXPORT_SYMBOL_GPL(__hid_register_driver);
3089
hid_unregister_driver(struct hid_driver * hdrv)3090 void hid_unregister_driver(struct hid_driver *hdrv)
3091 {
3092 driver_unregister(&hdrv->driver);
3093 hid_free_dynids(hdrv);
3094
3095 bus_for_each_drv(&hid_bus_type, NULL, hdrv, __bus_removed_driver);
3096 }
3097 EXPORT_SYMBOL_GPL(hid_unregister_driver);
3098
hid_check_keys_pressed(struct hid_device * hid)3099 int hid_check_keys_pressed(struct hid_device *hid)
3100 {
3101 struct hid_input *hidinput;
3102 int i;
3103
3104 if (!(hid->claimed & HID_CLAIMED_INPUT))
3105 return 0;
3106
3107 list_for_each_entry(hidinput, &hid->inputs, list) {
3108 for (i = 0; i < BITS_TO_LONGS(KEY_MAX); i++)
3109 if (hidinput->input->key[i])
3110 return 1;
3111 }
3112
3113 return 0;
3114 }
3115 EXPORT_SYMBOL_GPL(hid_check_keys_pressed);
3116
3117 #ifdef CONFIG_HID_BPF
3118 static const struct hid_ops __hid_ops = {
3119 .hid_get_report = hid_get_report,
3120 .hid_hw_raw_request = __hid_hw_raw_request,
3121 .hid_hw_output_report = __hid_hw_output_report,
3122 .hid_input_report = __hid_input_report,
3123 .owner = THIS_MODULE,
3124 .bus_type = &hid_bus_type,
3125 };
3126 #endif
3127
hid_init(void)3128 static int __init hid_init(void)
3129 {
3130 int ret;
3131
3132 ret = bus_register(&hid_bus_type);
3133 if (ret) {
3134 pr_err("can't register hid bus\n");
3135 goto err;
3136 }
3137
3138 #ifdef CONFIG_HID_BPF
3139 hid_ops = &__hid_ops;
3140 #endif
3141
3142 ret = hidraw_init();
3143 if (ret)
3144 goto err_bus;
3145
3146 hid_debug_init();
3147
3148 return 0;
3149 err_bus:
3150 bus_unregister(&hid_bus_type);
3151 err:
3152 return ret;
3153 }
3154
hid_exit(void)3155 static void __exit hid_exit(void)
3156 {
3157 #ifdef CONFIG_HID_BPF
3158 hid_ops = NULL;
3159 #endif
3160 hid_debug_exit();
3161 hidraw_exit();
3162 bus_unregister(&hid_bus_type);
3163 hid_quirks_exit(HID_BUS_ANY);
3164 }
3165
3166 module_init(hid_init);
3167 module_exit(hid_exit);
3168
3169 MODULE_AUTHOR("Andreas Gal");
3170 MODULE_AUTHOR("Vojtech Pavlik");
3171 MODULE_AUTHOR("Jiri Kosina");
3172 MODULE_DESCRIPTION("HID support for Linux");
3173 MODULE_LICENSE("GPL");
3174