1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * HID support for Linux 4 * 5 * Copyright (c) 1999 Andreas Gal 6 * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> 7 * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc 8 * Copyright (c) 2006-2012 Jiri Kosina 9 */ 10 11 /* 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 16 #include <linux/module.h> 17 #include <linux/slab.h> 18 #include <linux/init.h> 19 #include <linux/kernel.h> 20 #include <linux/list.h> 21 #include <linux/mm.h> 22 #include <linux/spinlock.h> 23 #include <asm/unaligned.h> 24 #include <asm/byteorder.h> 25 #include <linux/input.h> 26 #include <linux/wait.h> 27 #include <linux/vmalloc.h> 28 #include <linux/sched.h> 29 #include <linux/semaphore.h> 30 31 #include <linux/hid.h> 32 #include <linux/hiddev.h> 33 #include <linux/hid-debug.h> 34 #include <linux/hidraw.h> 35 36 #include "hid-ids.h" 37 38 /* 39 * Version Information 40 */ 41 42 #define DRIVER_DESC "HID core driver" 43 44 static int hid_ignore_special_drivers = 0; 45 module_param_named(ignore_special_drivers, hid_ignore_special_drivers, int, 0600); 46 MODULE_PARM_DESC(ignore_special_drivers, "Ignore any special drivers and handle all devices by generic driver"); 47 48 /* 49 * Register a new report for a device. 50 */ 51 52 struct hid_report *hid_register_report(struct hid_device *device, 53 enum hid_report_type type, unsigned int id, 54 unsigned int application) 55 { 56 struct hid_report_enum *report_enum = device->report_enum + type; 57 struct hid_report *report; 58 59 if (id >= HID_MAX_IDS) 60 return NULL; 61 if (report_enum->report_id_hash[id]) 62 return report_enum->report_id_hash[id]; 63 64 report = kzalloc(sizeof(struct hid_report), GFP_KERNEL); 65 if (!report) 66 return NULL; 67 68 if (id != 0) 69 report_enum->numbered = 1; 70 71 report->id = id; 72 report->type = type; 73 report->size = 0; 74 report->device = device; 75 report->application = application; 76 report_enum->report_id_hash[id] = report; 77 78 list_add_tail(&report->list, &report_enum->report_list); 79 INIT_LIST_HEAD(&report->field_entry_list); 80 81 return report; 82 } 83 EXPORT_SYMBOL_GPL(hid_register_report); 84 85 /* 86 * Register a new field for this report. 87 */ 88 89 static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages) 90 { 91 struct hid_field *field; 92 93 if (report->maxfield == HID_MAX_FIELDS) { 94 hid_err(report->device, "too many fields in report\n"); 95 return NULL; 96 } 97 98 field = kzalloc((sizeof(struct hid_field) + 99 usages * sizeof(struct hid_usage) + 100 3 * usages * sizeof(unsigned int)), GFP_KERNEL); 101 if (!field) 102 return NULL; 103 104 field->index = report->maxfield++; 105 report->field[field->index] = field; 106 field->usage = (struct hid_usage *)(field + 1); 107 field->value = (s32 *)(field->usage + usages); 108 field->new_value = (s32 *)(field->value + usages); 109 field->usages_priorities = (s32 *)(field->new_value + usages); 110 field->report = report; 111 112 return field; 113 } 114 115 /* 116 * Open a collection. The type/usage is pushed on the stack. 117 */ 118 119 static int open_collection(struct hid_parser *parser, unsigned type) 120 { 121 struct hid_collection *collection; 122 unsigned usage; 123 int collection_index; 124 125 usage = parser->local.usage[0]; 126 127 if (parser->collection_stack_ptr == parser->collection_stack_size) { 128 unsigned int *collection_stack; 129 unsigned int new_size = parser->collection_stack_size + 130 HID_COLLECTION_STACK_SIZE; 131 132 collection_stack = krealloc(parser->collection_stack, 133 new_size * sizeof(unsigned int), 134 GFP_KERNEL); 135 if (!collection_stack) 136 return -ENOMEM; 137 138 parser->collection_stack = collection_stack; 139 parser->collection_stack_size = new_size; 140 } 141 142 if (parser->device->maxcollection == parser->device->collection_size) { 143 collection = kmalloc( 144 array3_size(sizeof(struct hid_collection), 145 parser->device->collection_size, 146 2), 147 GFP_KERNEL); 148 if (collection == NULL) { 149 hid_err(parser->device, "failed to reallocate collection array\n"); 150 return -ENOMEM; 151 } 152 memcpy(collection, parser->device->collection, 153 sizeof(struct hid_collection) * 154 parser->device->collection_size); 155 memset(collection + parser->device->collection_size, 0, 156 sizeof(struct hid_collection) * 157 parser->device->collection_size); 158 kfree(parser->device->collection); 159 parser->device->collection = collection; 160 parser->device->collection_size *= 2; 161 } 162 163 parser->collection_stack[parser->collection_stack_ptr++] = 164 parser->device->maxcollection; 165 166 collection_index = parser->device->maxcollection++; 167 collection = parser->device->collection + collection_index; 168 collection->type = type; 169 collection->usage = usage; 170 collection->level = parser->collection_stack_ptr - 1; 171 collection->parent_idx = (collection->level == 0) ? -1 : 172 parser->collection_stack[collection->level - 1]; 173 174 if (type == HID_COLLECTION_APPLICATION) 175 parser->device->maxapplication++; 176 177 return 0; 178 } 179 180 /* 181 * Close a collection. 182 */ 183 184 static int close_collection(struct hid_parser *parser) 185 { 186 if (!parser->collection_stack_ptr) { 187 hid_err(parser->device, "collection stack underflow\n"); 188 return -EINVAL; 189 } 190 parser->collection_stack_ptr--; 191 return 0; 192 } 193 194 /* 195 * Climb up the stack, search for the specified collection type 196 * and return the usage. 197 */ 198 199 static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type) 200 { 201 struct hid_collection *collection = parser->device->collection; 202 int n; 203 204 for (n = parser->collection_stack_ptr - 1; n >= 0; n--) { 205 unsigned index = parser->collection_stack[n]; 206 if (collection[index].type == type) 207 return collection[index].usage; 208 } 209 return 0; /* we know nothing about this usage type */ 210 } 211 212 /* 213 * Concatenate usage which defines 16 bits or less with the 214 * currently defined usage page to form a 32 bit usage 215 */ 216 217 static void complete_usage(struct hid_parser *parser, unsigned int index) 218 { 219 parser->local.usage[index] &= 0xFFFF; 220 parser->local.usage[index] |= 221 (parser->global.usage_page & 0xFFFF) << 16; 222 } 223 224 /* 225 * Add a usage to the temporary parser table. 226 */ 227 228 static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size) 229 { 230 if (parser->local.usage_index >= HID_MAX_USAGES) { 231 hid_err(parser->device, "usage index exceeded\n"); 232 return -1; 233 } 234 parser->local.usage[parser->local.usage_index] = usage; 235 236 /* 237 * If Usage item only includes usage id, concatenate it with 238 * currently defined usage page 239 */ 240 if (size <= 2) 241 complete_usage(parser, parser->local.usage_index); 242 243 parser->local.usage_size[parser->local.usage_index] = size; 244 parser->local.collection_index[parser->local.usage_index] = 245 parser->collection_stack_ptr ? 246 parser->collection_stack[parser->collection_stack_ptr - 1] : 0; 247 parser->local.usage_index++; 248 return 0; 249 } 250 251 /* 252 * Register a new field for this report. 253 */ 254 255 static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsigned flags) 256 { 257 struct hid_report *report; 258 struct hid_field *field; 259 unsigned int usages; 260 unsigned int offset; 261 unsigned int i; 262 unsigned int application; 263 264 application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION); 265 266 report = hid_register_report(parser->device, report_type, 267 parser->global.report_id, application); 268 if (!report) { 269 hid_err(parser->device, "hid_register_report failed\n"); 270 return -1; 271 } 272 273 /* Handle both signed and unsigned cases properly */ 274 if ((parser->global.logical_minimum < 0 && 275 parser->global.logical_maximum < 276 parser->global.logical_minimum) || 277 (parser->global.logical_minimum >= 0 && 278 (__u32)parser->global.logical_maximum < 279 (__u32)parser->global.logical_minimum)) { 280 dbg_hid("logical range invalid 0x%x 0x%x\n", 281 parser->global.logical_minimum, 282 parser->global.logical_maximum); 283 return -1; 284 } 285 286 offset = report->size; 287 report->size += parser->global.report_size * parser->global.report_count; 288 289 /* Total size check: Allow for possible report index byte */ 290 if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) { 291 hid_err(parser->device, "report is too long\n"); 292 return -1; 293 } 294 295 if (!parser->local.usage_index) /* Ignore padding fields */ 296 return 0; 297 298 usages = max_t(unsigned, parser->local.usage_index, 299 parser->global.report_count); 300 301 field = hid_register_field(report, usages); 302 if (!field) 303 return 0; 304 305 field->physical = hid_lookup_collection(parser, HID_COLLECTION_PHYSICAL); 306 field->logical = hid_lookup_collection(parser, HID_COLLECTION_LOGICAL); 307 field->application = application; 308 309 for (i = 0; i < usages; i++) { 310 unsigned j = i; 311 /* Duplicate the last usage we parsed if we have excess values */ 312 if (i >= parser->local.usage_index) 313 j = parser->local.usage_index - 1; 314 field->usage[i].hid = parser->local.usage[j]; 315 field->usage[i].collection_index = 316 parser->local.collection_index[j]; 317 field->usage[i].usage_index = i; 318 field->usage[i].resolution_multiplier = 1; 319 } 320 321 field->maxusage = usages; 322 field->flags = flags; 323 field->report_offset = offset; 324 field->report_type = report_type; 325 field->report_size = parser->global.report_size; 326 field->report_count = parser->global.report_count; 327 field->logical_minimum = parser->global.logical_minimum; 328 field->logical_maximum = parser->global.logical_maximum; 329 field->physical_minimum = parser->global.physical_minimum; 330 field->physical_maximum = parser->global.physical_maximum; 331 field->unit_exponent = parser->global.unit_exponent; 332 field->unit = parser->global.unit; 333 334 return 0; 335 } 336 337 /* 338 * Read data value from item. 339 */ 340 341 static u32 item_udata(struct hid_item *item) 342 { 343 switch (item->size) { 344 case 1: return item->data.u8; 345 case 2: return item->data.u16; 346 case 4: return item->data.u32; 347 } 348 return 0; 349 } 350 351 static s32 item_sdata(struct hid_item *item) 352 { 353 switch (item->size) { 354 case 1: return item->data.s8; 355 case 2: return item->data.s16; 356 case 4: return item->data.s32; 357 } 358 return 0; 359 } 360 361 /* 362 * Process a global item. 363 */ 364 365 static int hid_parser_global(struct hid_parser *parser, struct hid_item *item) 366 { 367 __s32 raw_value; 368 switch (item->tag) { 369 case HID_GLOBAL_ITEM_TAG_PUSH: 370 371 if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) { 372 hid_err(parser->device, "global environment stack overflow\n"); 373 return -1; 374 } 375 376 memcpy(parser->global_stack + parser->global_stack_ptr++, 377 &parser->global, sizeof(struct hid_global)); 378 return 0; 379 380 case HID_GLOBAL_ITEM_TAG_POP: 381 382 if (!parser->global_stack_ptr) { 383 hid_err(parser->device, "global environment stack underflow\n"); 384 return -1; 385 } 386 387 memcpy(&parser->global, parser->global_stack + 388 --parser->global_stack_ptr, sizeof(struct hid_global)); 389 return 0; 390 391 case HID_GLOBAL_ITEM_TAG_USAGE_PAGE: 392 parser->global.usage_page = item_udata(item); 393 return 0; 394 395 case HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM: 396 parser->global.logical_minimum = item_sdata(item); 397 return 0; 398 399 case HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM: 400 if (parser->global.logical_minimum < 0) 401 parser->global.logical_maximum = item_sdata(item); 402 else 403 parser->global.logical_maximum = item_udata(item); 404 return 0; 405 406 case HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM: 407 parser->global.physical_minimum = item_sdata(item); 408 return 0; 409 410 case HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM: 411 if (parser->global.physical_minimum < 0) 412 parser->global.physical_maximum = item_sdata(item); 413 else 414 parser->global.physical_maximum = item_udata(item); 415 return 0; 416 417 case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT: 418 /* Many devices provide unit exponent as a two's complement 419 * nibble due to the common misunderstanding of HID 420 * specification 1.11, 6.2.2.7 Global Items. Attempt to handle 421 * both this and the standard encoding. */ 422 raw_value = item_sdata(item); 423 if (!(raw_value & 0xfffffff0)) 424 parser->global.unit_exponent = hid_snto32(raw_value, 4); 425 else 426 parser->global.unit_exponent = raw_value; 427 return 0; 428 429 case HID_GLOBAL_ITEM_TAG_UNIT: 430 parser->global.unit = item_udata(item); 431 return 0; 432 433 case HID_GLOBAL_ITEM_TAG_REPORT_SIZE: 434 parser->global.report_size = item_udata(item); 435 if (parser->global.report_size > 256) { 436 hid_err(parser->device, "invalid report_size %d\n", 437 parser->global.report_size); 438 return -1; 439 } 440 return 0; 441 442 case HID_GLOBAL_ITEM_TAG_REPORT_COUNT: 443 parser->global.report_count = item_udata(item); 444 if (parser->global.report_count > HID_MAX_USAGES) { 445 hid_err(parser->device, "invalid report_count %d\n", 446 parser->global.report_count); 447 return -1; 448 } 449 return 0; 450 451 case HID_GLOBAL_ITEM_TAG_REPORT_ID: 452 parser->global.report_id = item_udata(item); 453 if (parser->global.report_id == 0 || 454 parser->global.report_id >= HID_MAX_IDS) { 455 hid_err(parser->device, "report_id %u is invalid\n", 456 parser->global.report_id); 457 return -1; 458 } 459 return 0; 460 461 default: 462 hid_err(parser->device, "unknown global tag 0x%x\n", item->tag); 463 return -1; 464 } 465 } 466 467 /* 468 * Process a local item. 469 */ 470 471 static int hid_parser_local(struct hid_parser *parser, struct hid_item *item) 472 { 473 __u32 data; 474 unsigned n; 475 __u32 count; 476 477 data = item_udata(item); 478 479 switch (item->tag) { 480 case HID_LOCAL_ITEM_TAG_DELIMITER: 481 482 if (data) { 483 /* 484 * We treat items before the first delimiter 485 * as global to all usage sets (branch 0). 486 * In the moment we process only these global 487 * items and the first delimiter set. 488 */ 489 if (parser->local.delimiter_depth != 0) { 490 hid_err(parser->device, "nested delimiters\n"); 491 return -1; 492 } 493 parser->local.delimiter_depth++; 494 parser->local.delimiter_branch++; 495 } else { 496 if (parser->local.delimiter_depth < 1) { 497 hid_err(parser->device, "bogus close delimiter\n"); 498 return -1; 499 } 500 parser->local.delimiter_depth--; 501 } 502 return 0; 503 504 case HID_LOCAL_ITEM_TAG_USAGE: 505 506 if (parser->local.delimiter_branch > 1) { 507 dbg_hid("alternative usage ignored\n"); 508 return 0; 509 } 510 511 return hid_add_usage(parser, data, item->size); 512 513 case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM: 514 515 if (parser->local.delimiter_branch > 1) { 516 dbg_hid("alternative usage ignored\n"); 517 return 0; 518 } 519 520 parser->local.usage_minimum = data; 521 return 0; 522 523 case HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM: 524 525 if (parser->local.delimiter_branch > 1) { 526 dbg_hid("alternative usage ignored\n"); 527 return 0; 528 } 529 530 count = data - parser->local.usage_minimum; 531 if (count + parser->local.usage_index >= HID_MAX_USAGES) { 532 /* 533 * We do not warn if the name is not set, we are 534 * actually pre-scanning the device. 535 */ 536 if (dev_name(&parser->device->dev)) 537 hid_warn(parser->device, 538 "ignoring exceeding usage max\n"); 539 data = HID_MAX_USAGES - parser->local.usage_index + 540 parser->local.usage_minimum - 1; 541 if (data <= 0) { 542 hid_err(parser->device, 543 "no more usage index available\n"); 544 return -1; 545 } 546 } 547 548 for (n = parser->local.usage_minimum; n <= data; n++) 549 if (hid_add_usage(parser, n, item->size)) { 550 dbg_hid("hid_add_usage failed\n"); 551 return -1; 552 } 553 return 0; 554 555 default: 556 557 dbg_hid("unknown local item tag 0x%x\n", item->tag); 558 return 0; 559 } 560 return 0; 561 } 562 563 /* 564 * Concatenate Usage Pages into Usages where relevant: 565 * As per specification, 6.2.2.8: "When the parser encounters a main item it 566 * concatenates the last declared Usage Page with a Usage to form a complete 567 * usage value." 568 */ 569 570 static void hid_concatenate_last_usage_page(struct hid_parser *parser) 571 { 572 int i; 573 unsigned int usage_page; 574 unsigned int current_page; 575 576 if (!parser->local.usage_index) 577 return; 578 579 usage_page = parser->global.usage_page; 580 581 /* 582 * Concatenate usage page again only if last declared Usage Page 583 * has not been already used in previous usages concatenation 584 */ 585 for (i = parser->local.usage_index - 1; i >= 0; i--) { 586 if (parser->local.usage_size[i] > 2) 587 /* Ignore extended usages */ 588 continue; 589 590 current_page = parser->local.usage[i] >> 16; 591 if (current_page == usage_page) 592 break; 593 594 complete_usage(parser, i); 595 } 596 } 597 598 /* 599 * Process a main item. 600 */ 601 602 static int hid_parser_main(struct hid_parser *parser, struct hid_item *item) 603 { 604 __u32 data; 605 int ret; 606 607 hid_concatenate_last_usage_page(parser); 608 609 data = item_udata(item); 610 611 switch (item->tag) { 612 case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION: 613 ret = open_collection(parser, data & 0xff); 614 break; 615 case HID_MAIN_ITEM_TAG_END_COLLECTION: 616 ret = close_collection(parser); 617 break; 618 case HID_MAIN_ITEM_TAG_INPUT: 619 ret = hid_add_field(parser, HID_INPUT_REPORT, data); 620 break; 621 case HID_MAIN_ITEM_TAG_OUTPUT: 622 ret = hid_add_field(parser, HID_OUTPUT_REPORT, data); 623 break; 624 case HID_MAIN_ITEM_TAG_FEATURE: 625 ret = hid_add_field(parser, HID_FEATURE_REPORT, data); 626 break; 627 default: 628 hid_warn(parser->device, "unknown main item tag 0x%x\n", item->tag); 629 ret = 0; 630 } 631 632 memset(&parser->local, 0, sizeof(parser->local)); /* Reset the local parser environment */ 633 634 return ret; 635 } 636 637 /* 638 * Process a reserved item. 639 */ 640 641 static int hid_parser_reserved(struct hid_parser *parser, struct hid_item *item) 642 { 643 dbg_hid("reserved item type, tag 0x%x\n", item->tag); 644 return 0; 645 } 646 647 /* 648 * Free a report and all registered fields. The field->usage and 649 * field->value table's are allocated behind the field, so we need 650 * only to free(field) itself. 651 */ 652 653 static void hid_free_report(struct hid_report *report) 654 { 655 unsigned n; 656 657 kfree(report->field_entries); 658 659 for (n = 0; n < report->maxfield; n++) 660 kfree(report->field[n]); 661 kfree(report); 662 } 663 664 /* 665 * Close report. This function returns the device 666 * state to the point prior to hid_open_report(). 667 */ 668 static void hid_close_report(struct hid_device *device) 669 { 670 unsigned i, j; 671 672 for (i = 0; i < HID_REPORT_TYPES; i++) { 673 struct hid_report_enum *report_enum = device->report_enum + i; 674 675 for (j = 0; j < HID_MAX_IDS; j++) { 676 struct hid_report *report = report_enum->report_id_hash[j]; 677 if (report) 678 hid_free_report(report); 679 } 680 memset(report_enum, 0, sizeof(*report_enum)); 681 INIT_LIST_HEAD(&report_enum->report_list); 682 } 683 684 kfree(device->rdesc); 685 device->rdesc = NULL; 686 device->rsize = 0; 687 688 kfree(device->collection); 689 device->collection = NULL; 690 device->collection_size = 0; 691 device->maxcollection = 0; 692 device->maxapplication = 0; 693 694 device->status &= ~HID_STAT_PARSED; 695 } 696 697 /* 698 * Free a device structure, all reports, and all fields. 699 */ 700 701 static void hid_device_release(struct device *dev) 702 { 703 struct hid_device *hid = to_hid_device(dev); 704 705 hid_close_report(hid); 706 kfree(hid->dev_rdesc); 707 kfree(hid); 708 } 709 710 /* 711 * Fetch a report description item from the data stream. We support long 712 * items, though they are not used yet. 713 */ 714 715 static u8 *fetch_item(__u8 *start, __u8 *end, struct hid_item *item) 716 { 717 u8 b; 718 719 if ((end - start) <= 0) 720 return NULL; 721 722 b = *start++; 723 724 item->type = (b >> 2) & 3; 725 item->tag = (b >> 4) & 15; 726 727 if (item->tag == HID_ITEM_TAG_LONG) { 728 729 item->format = HID_ITEM_FORMAT_LONG; 730 731 if ((end - start) < 2) 732 return NULL; 733 734 item->size = *start++; 735 item->tag = *start++; 736 737 if ((end - start) < item->size) 738 return NULL; 739 740 item->data.longdata = start; 741 start += item->size; 742 return start; 743 } 744 745 item->format = HID_ITEM_FORMAT_SHORT; 746 item->size = b & 3; 747 748 switch (item->size) { 749 case 0: 750 return start; 751 752 case 1: 753 if ((end - start) < 1) 754 return NULL; 755 item->data.u8 = *start++; 756 return start; 757 758 case 2: 759 if ((end - start) < 2) 760 return NULL; 761 item->data.u16 = get_unaligned_le16(start); 762 start = (__u8 *)((__le16 *)start + 1); 763 return start; 764 765 case 3: 766 item->size++; 767 if ((end - start) < 4) 768 return NULL; 769 item->data.u32 = get_unaligned_le32(start); 770 start = (__u8 *)((__le32 *)start + 1); 771 return start; 772 } 773 774 return NULL; 775 } 776 777 static void hid_scan_input_usage(struct hid_parser *parser, u32 usage) 778 { 779 struct hid_device *hid = parser->device; 780 781 if (usage == HID_DG_CONTACTID) 782 hid->group = HID_GROUP_MULTITOUCH; 783 } 784 785 static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage) 786 { 787 if (usage == 0xff0000c5 && parser->global.report_count == 256 && 788 parser->global.report_size == 8) 789 parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8; 790 791 if (usage == 0xff0000c6 && parser->global.report_count == 1 && 792 parser->global.report_size == 8) 793 parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8; 794 } 795 796 static void hid_scan_collection(struct hid_parser *parser, unsigned type) 797 { 798 struct hid_device *hid = parser->device; 799 int i; 800 801 if (((parser->global.usage_page << 16) == HID_UP_SENSOR) && 802 (type == HID_COLLECTION_PHYSICAL || 803 type == HID_COLLECTION_APPLICATION)) 804 hid->group = HID_GROUP_SENSOR_HUB; 805 806 if (hid->vendor == USB_VENDOR_ID_MICROSOFT && 807 hid->product == USB_DEVICE_ID_MS_POWER_COVER && 808 hid->group == HID_GROUP_MULTITOUCH) 809 hid->group = HID_GROUP_GENERIC; 810 811 if ((parser->global.usage_page << 16) == HID_UP_GENDESK) 812 for (i = 0; i < parser->local.usage_index; i++) 813 if (parser->local.usage[i] == HID_GD_POINTER) 814 parser->scan_flags |= HID_SCAN_FLAG_GD_POINTER; 815 816 if ((parser->global.usage_page << 16) >= HID_UP_MSVENDOR) 817 parser->scan_flags |= HID_SCAN_FLAG_VENDOR_SPECIFIC; 818 819 if ((parser->global.usage_page << 16) == HID_UP_GOOGLEVENDOR) 820 for (i = 0; i < parser->local.usage_index; i++) 821 if (parser->local.usage[i] == 822 (HID_UP_GOOGLEVENDOR | 0x0001)) 823 parser->device->group = 824 HID_GROUP_VIVALDI; 825 } 826 827 static int hid_scan_main(struct hid_parser *parser, struct hid_item *item) 828 { 829 __u32 data; 830 int i; 831 832 hid_concatenate_last_usage_page(parser); 833 834 data = item_udata(item); 835 836 switch (item->tag) { 837 case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION: 838 hid_scan_collection(parser, data & 0xff); 839 break; 840 case HID_MAIN_ITEM_TAG_END_COLLECTION: 841 break; 842 case HID_MAIN_ITEM_TAG_INPUT: 843 /* ignore constant inputs, they will be ignored by hid-input */ 844 if (data & HID_MAIN_ITEM_CONSTANT) 845 break; 846 for (i = 0; i < parser->local.usage_index; i++) 847 hid_scan_input_usage(parser, parser->local.usage[i]); 848 break; 849 case HID_MAIN_ITEM_TAG_OUTPUT: 850 break; 851 case HID_MAIN_ITEM_TAG_FEATURE: 852 for (i = 0; i < parser->local.usage_index; i++) 853 hid_scan_feature_usage(parser, parser->local.usage[i]); 854 break; 855 } 856 857 /* Reset the local parser environment */ 858 memset(&parser->local, 0, sizeof(parser->local)); 859 860 return 0; 861 } 862 863 /* 864 * Scan a report descriptor before the device is added to the bus. 865 * Sets device groups and other properties that determine what driver 866 * to load. 867 */ 868 static int hid_scan_report(struct hid_device *hid) 869 { 870 struct hid_parser *parser; 871 struct hid_item item; 872 __u8 *start = hid->dev_rdesc; 873 __u8 *end = start + hid->dev_rsize; 874 static int (*dispatch_type[])(struct hid_parser *parser, 875 struct hid_item *item) = { 876 hid_scan_main, 877 hid_parser_global, 878 hid_parser_local, 879 hid_parser_reserved 880 }; 881 882 parser = vzalloc(sizeof(struct hid_parser)); 883 if (!parser) 884 return -ENOMEM; 885 886 parser->device = hid; 887 hid->group = HID_GROUP_GENERIC; 888 889 /* 890 * The parsing is simpler than the one in hid_open_report() as we should 891 * be robust against hid errors. Those errors will be raised by 892 * hid_open_report() anyway. 893 */ 894 while ((start = fetch_item(start, end, &item)) != NULL) 895 dispatch_type[item.type](parser, &item); 896 897 /* 898 * Handle special flags set during scanning. 899 */ 900 if ((parser->scan_flags & HID_SCAN_FLAG_MT_WIN_8) && 901 (hid->group == HID_GROUP_MULTITOUCH)) 902 hid->group = HID_GROUP_MULTITOUCH_WIN_8; 903 904 /* 905 * Vendor specific handlings 906 */ 907 switch (hid->vendor) { 908 case USB_VENDOR_ID_WACOM: 909 hid->group = HID_GROUP_WACOM; 910 break; 911 case USB_VENDOR_ID_SYNAPTICS: 912 if (hid->group == HID_GROUP_GENERIC) 913 if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC) 914 && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER)) 915 /* 916 * hid-rmi should take care of them, 917 * not hid-generic 918 */ 919 hid->group = HID_GROUP_RMI; 920 break; 921 } 922 923 kfree(parser->collection_stack); 924 vfree(parser); 925 return 0; 926 } 927 928 /** 929 * hid_parse_report - parse device report 930 * 931 * @hid: hid device 932 * @start: report start 933 * @size: report size 934 * 935 * Allocate the device report as read by the bus driver. This function should 936 * only be called from parse() in ll drivers. 937 */ 938 int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size) 939 { 940 hid->dev_rdesc = kmemdup(start, size, GFP_KERNEL); 941 if (!hid->dev_rdesc) 942 return -ENOMEM; 943 hid->dev_rsize = size; 944 return 0; 945 } 946 EXPORT_SYMBOL_GPL(hid_parse_report); 947 948 static const char * const hid_report_names[] = { 949 "HID_INPUT_REPORT", 950 "HID_OUTPUT_REPORT", 951 "HID_FEATURE_REPORT", 952 }; 953 /** 954 * hid_validate_values - validate existing device report's value indexes 955 * 956 * @hid: hid device 957 * @type: which report type to examine 958 * @id: which report ID to examine (0 for first) 959 * @field_index: which report field to examine 960 * @report_counts: expected number of values 961 * 962 * Validate the number of values in a given field of a given report, after 963 * parsing. 964 */ 965 struct hid_report *hid_validate_values(struct hid_device *hid, 966 enum hid_report_type type, unsigned int id, 967 unsigned int field_index, 968 unsigned int report_counts) 969 { 970 struct hid_report *report; 971 972 if (type > HID_FEATURE_REPORT) { 973 hid_err(hid, "invalid HID report type %u\n", type); 974 return NULL; 975 } 976 977 if (id >= HID_MAX_IDS) { 978 hid_err(hid, "invalid HID report id %u\n", id); 979 return NULL; 980 } 981 982 /* 983 * Explicitly not using hid_get_report() here since it depends on 984 * ->numbered being checked, which may not always be the case when 985 * drivers go to access report values. 986 */ 987 if (id == 0) { 988 /* 989 * Validating on id 0 means we should examine the first 990 * report in the list. 991 */ 992 report = list_first_entry_or_null( 993 &hid->report_enum[type].report_list, 994 struct hid_report, list); 995 } else { 996 report = hid->report_enum[type].report_id_hash[id]; 997 } 998 if (!report) { 999 hid_err(hid, "missing %s %u\n", hid_report_names[type], id); 1000 return NULL; 1001 } 1002 if (report->maxfield <= field_index) { 1003 hid_err(hid, "not enough fields in %s %u\n", 1004 hid_report_names[type], id); 1005 return NULL; 1006 } 1007 if (report->field[field_index]->report_count < report_counts) { 1008 hid_err(hid, "not enough values in %s %u field %u\n", 1009 hid_report_names[type], id, field_index); 1010 return NULL; 1011 } 1012 return report; 1013 } 1014 EXPORT_SYMBOL_GPL(hid_validate_values); 1015 1016 static int hid_calculate_multiplier(struct hid_device *hid, 1017 struct hid_field *multiplier) 1018 { 1019 int m; 1020 __s32 v = *multiplier->value; 1021 __s32 lmin = multiplier->logical_minimum; 1022 __s32 lmax = multiplier->logical_maximum; 1023 __s32 pmin = multiplier->physical_minimum; 1024 __s32 pmax = multiplier->physical_maximum; 1025 1026 /* 1027 * "Because OS implementations will generally divide the control's 1028 * reported count by the Effective Resolution Multiplier, designers 1029 * should take care not to establish a potential Effective 1030 * Resolution Multiplier of zero." 1031 * HID Usage Table, v1.12, Section 4.3.1, p31 1032 */ 1033 if (lmax - lmin == 0) 1034 return 1; 1035 /* 1036 * Handling the unit exponent is left as an exercise to whoever 1037 * finds a device where that exponent is not 0. 1038 */ 1039 m = ((v - lmin)/(lmax - lmin) * (pmax - pmin) + pmin); 1040 if (unlikely(multiplier->unit_exponent != 0)) { 1041 hid_warn(hid, 1042 "unsupported Resolution Multiplier unit exponent %d\n", 1043 multiplier->unit_exponent); 1044 } 1045 1046 /* There are no devices with an effective multiplier > 255 */ 1047 if (unlikely(m == 0 || m > 255 || m < -255)) { 1048 hid_warn(hid, "unsupported Resolution Multiplier %d\n", m); 1049 m = 1; 1050 } 1051 1052 return m; 1053 } 1054 1055 static void hid_apply_multiplier_to_field(struct hid_device *hid, 1056 struct hid_field *field, 1057 struct hid_collection *multiplier_collection, 1058 int effective_multiplier) 1059 { 1060 struct hid_collection *collection; 1061 struct hid_usage *usage; 1062 int i; 1063 1064 /* 1065 * If multiplier_collection is NULL, the multiplier applies 1066 * to all fields in the report. 1067 * Otherwise, it is the Logical Collection the multiplier applies to 1068 * but our field may be in a subcollection of that collection. 1069 */ 1070 for (i = 0; i < field->maxusage; i++) { 1071 usage = &field->usage[i]; 1072 1073 collection = &hid->collection[usage->collection_index]; 1074 while (collection->parent_idx != -1 && 1075 collection != multiplier_collection) 1076 collection = &hid->collection[collection->parent_idx]; 1077 1078 if (collection->parent_idx != -1 || 1079 multiplier_collection == NULL) 1080 usage->resolution_multiplier = effective_multiplier; 1081 1082 } 1083 } 1084 1085 static void hid_apply_multiplier(struct hid_device *hid, 1086 struct hid_field *multiplier) 1087 { 1088 struct hid_report_enum *rep_enum; 1089 struct hid_report *rep; 1090 struct hid_field *field; 1091 struct hid_collection *multiplier_collection; 1092 int effective_multiplier; 1093 int i; 1094 1095 /* 1096 * "The Resolution Multiplier control must be contained in the same 1097 * Logical Collection as the control(s) to which it is to be applied. 1098 * If no Resolution Multiplier is defined, then the Resolution 1099 * Multiplier defaults to 1. If more than one control exists in a 1100 * Logical Collection, the Resolution Multiplier is associated with 1101 * all controls in the collection. If no Logical Collection is 1102 * defined, the Resolution Multiplier is associated with all 1103 * controls in the report." 1104 * HID Usage Table, v1.12, Section 4.3.1, p30 1105 * 1106 * Thus, search from the current collection upwards until we find a 1107 * logical collection. Then search all fields for that same parent 1108 * collection. Those are the fields the multiplier applies to. 1109 * 1110 * If we have more than one multiplier, it will overwrite the 1111 * applicable fields later. 1112 */ 1113 multiplier_collection = &hid->collection[multiplier->usage->collection_index]; 1114 while (multiplier_collection->parent_idx != -1 && 1115 multiplier_collection->type != HID_COLLECTION_LOGICAL) 1116 multiplier_collection = &hid->collection[multiplier_collection->parent_idx]; 1117 1118 effective_multiplier = hid_calculate_multiplier(hid, multiplier); 1119 1120 rep_enum = &hid->report_enum[HID_INPUT_REPORT]; 1121 list_for_each_entry(rep, &rep_enum->report_list, list) { 1122 for (i = 0; i < rep->maxfield; i++) { 1123 field = rep->field[i]; 1124 hid_apply_multiplier_to_field(hid, field, 1125 multiplier_collection, 1126 effective_multiplier); 1127 } 1128 } 1129 } 1130 1131 /* 1132 * hid_setup_resolution_multiplier - set up all resolution multipliers 1133 * 1134 * @device: hid device 1135 * 1136 * Search for all Resolution Multiplier Feature Reports and apply their 1137 * value to all matching Input items. This only updates the internal struct 1138 * fields. 1139 * 1140 * The Resolution Multiplier is applied by the hardware. If the multiplier 1141 * is anything other than 1, the hardware will send pre-multiplied events 1142 * so that the same physical interaction generates an accumulated 1143 * accumulated_value = value * * multiplier 1144 * This may be achieved by sending 1145 * - "value * multiplier" for each event, or 1146 * - "value" but "multiplier" times as frequently, or 1147 * - a combination of the above 1148 * The only guarantee is that the same physical interaction always generates 1149 * an accumulated 'value * multiplier'. 1150 * 1151 * This function must be called before any event processing and after 1152 * any SetRequest to the Resolution Multiplier. 1153 */ 1154 void hid_setup_resolution_multiplier(struct hid_device *hid) 1155 { 1156 struct hid_report_enum *rep_enum; 1157 struct hid_report *rep; 1158 struct hid_usage *usage; 1159 int i, j; 1160 1161 rep_enum = &hid->report_enum[HID_FEATURE_REPORT]; 1162 list_for_each_entry(rep, &rep_enum->report_list, list) { 1163 for (i = 0; i < rep->maxfield; i++) { 1164 /* Ignore if report count is out of bounds. */ 1165 if (rep->field[i]->report_count < 1) 1166 continue; 1167 1168 for (j = 0; j < rep->field[i]->maxusage; j++) { 1169 usage = &rep->field[i]->usage[j]; 1170 if (usage->hid == HID_GD_RESOLUTION_MULTIPLIER) 1171 hid_apply_multiplier(hid, 1172 rep->field[i]); 1173 } 1174 } 1175 } 1176 } 1177 EXPORT_SYMBOL_GPL(hid_setup_resolution_multiplier); 1178 1179 /** 1180 * hid_open_report - open a driver-specific device report 1181 * 1182 * @device: hid device 1183 * 1184 * Parse a report description into a hid_device structure. Reports are 1185 * enumerated, fields are attached to these reports. 1186 * 0 returned on success, otherwise nonzero error value. 1187 * 1188 * This function (or the equivalent hid_parse() macro) should only be 1189 * called from probe() in drivers, before starting the device. 1190 */ 1191 int hid_open_report(struct hid_device *device) 1192 { 1193 struct hid_parser *parser; 1194 struct hid_item item; 1195 unsigned int size; 1196 __u8 *start; 1197 __u8 *buf; 1198 __u8 *end; 1199 __u8 *next; 1200 int ret; 1201 int i; 1202 static int (*dispatch_type[])(struct hid_parser *parser, 1203 struct hid_item *item) = { 1204 hid_parser_main, 1205 hid_parser_global, 1206 hid_parser_local, 1207 hid_parser_reserved 1208 }; 1209 1210 if (WARN_ON(device->status & HID_STAT_PARSED)) 1211 return -EBUSY; 1212 1213 start = device->dev_rdesc; 1214 if (WARN_ON(!start)) 1215 return -ENODEV; 1216 size = device->dev_rsize; 1217 1218 /* call_hid_bpf_rdesc_fixup() ensures we work on a copy of rdesc */ 1219 buf = call_hid_bpf_rdesc_fixup(device, start, &size); 1220 if (buf == NULL) 1221 return -ENOMEM; 1222 1223 if (device->driver->report_fixup) 1224 start = device->driver->report_fixup(device, buf, &size); 1225 else 1226 start = buf; 1227 1228 start = kmemdup(start, size, GFP_KERNEL); 1229 kfree(buf); 1230 if (start == NULL) 1231 return -ENOMEM; 1232 1233 device->rdesc = start; 1234 device->rsize = size; 1235 1236 parser = vzalloc(sizeof(struct hid_parser)); 1237 if (!parser) { 1238 ret = -ENOMEM; 1239 goto alloc_err; 1240 } 1241 1242 parser->device = device; 1243 1244 end = start + size; 1245 1246 device->collection = kcalloc(HID_DEFAULT_NUM_COLLECTIONS, 1247 sizeof(struct hid_collection), GFP_KERNEL); 1248 if (!device->collection) { 1249 ret = -ENOMEM; 1250 goto err; 1251 } 1252 device->collection_size = HID_DEFAULT_NUM_COLLECTIONS; 1253 for (i = 0; i < HID_DEFAULT_NUM_COLLECTIONS; i++) 1254 device->collection[i].parent_idx = -1; 1255 1256 ret = -EINVAL; 1257 while ((next = fetch_item(start, end, &item)) != NULL) { 1258 start = next; 1259 1260 if (item.format != HID_ITEM_FORMAT_SHORT) { 1261 hid_err(device, "unexpected long global item\n"); 1262 goto err; 1263 } 1264 1265 if (dispatch_type[item.type](parser, &item)) { 1266 hid_err(device, "item %u %u %u %u parsing failed\n", 1267 item.format, (unsigned)item.size, 1268 (unsigned)item.type, (unsigned)item.tag); 1269 goto err; 1270 } 1271 1272 if (start == end) { 1273 if (parser->collection_stack_ptr) { 1274 hid_err(device, "unbalanced collection at end of report description\n"); 1275 goto err; 1276 } 1277 if (parser->local.delimiter_depth) { 1278 hid_err(device, "unbalanced delimiter at end of report description\n"); 1279 goto err; 1280 } 1281 1282 /* 1283 * fetch initial values in case the device's 1284 * default multiplier isn't the recommended 1 1285 */ 1286 hid_setup_resolution_multiplier(device); 1287 1288 kfree(parser->collection_stack); 1289 vfree(parser); 1290 device->status |= HID_STAT_PARSED; 1291 1292 return 0; 1293 } 1294 } 1295 1296 hid_err(device, "item fetching failed at offset %u/%u\n", 1297 size - (unsigned int)(end - start), size); 1298 err: 1299 kfree(parser->collection_stack); 1300 alloc_err: 1301 vfree(parser); 1302 hid_close_report(device); 1303 return ret; 1304 } 1305 EXPORT_SYMBOL_GPL(hid_open_report); 1306 1307 /* 1308 * Convert a signed n-bit integer to signed 32-bit integer. Common 1309 * cases are done through the compiler, the screwed things has to be 1310 * done by hand. 1311 */ 1312 1313 static s32 snto32(__u32 value, unsigned n) 1314 { 1315 if (!value || !n) 1316 return 0; 1317 1318 if (n > 32) 1319 n = 32; 1320 1321 switch (n) { 1322 case 8: return ((__s8)value); 1323 case 16: return ((__s16)value); 1324 case 32: return ((__s32)value); 1325 } 1326 return value & (1 << (n - 1)) ? value | (~0U << n) : value; 1327 } 1328 1329 s32 hid_snto32(__u32 value, unsigned n) 1330 { 1331 return snto32(value, n); 1332 } 1333 EXPORT_SYMBOL_GPL(hid_snto32); 1334 1335 /* 1336 * Convert a signed 32-bit integer to a signed n-bit integer. 1337 */ 1338 1339 static u32 s32ton(__s32 value, unsigned n) 1340 { 1341 s32 a = value >> (n - 1); 1342 if (a && a != -1) 1343 return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1; 1344 return value & ((1 << n) - 1); 1345 } 1346 1347 /* 1348 * Extract/implement a data field from/to a little endian report (bit array). 1349 * 1350 * Code sort-of follows HID spec: 1351 * http://www.usb.org/developers/hidpage/HID1_11.pdf 1352 * 1353 * While the USB HID spec allows unlimited length bit fields in "report 1354 * descriptors", most devices never use more than 16 bits. 1355 * One model of UPS is claimed to report "LINEV" as a 32-bit field. 1356 * Search linux-kernel and linux-usb-devel archives for "hid-core extract". 1357 */ 1358 1359 static u32 __extract(u8 *report, unsigned offset, int n) 1360 { 1361 unsigned int idx = offset / 8; 1362 unsigned int bit_nr = 0; 1363 unsigned int bit_shift = offset % 8; 1364 int bits_to_copy = 8 - bit_shift; 1365 u32 value = 0; 1366 u32 mask = n < 32 ? (1U << n) - 1 : ~0U; 1367 1368 while (n > 0) { 1369 value |= ((u32)report[idx] >> bit_shift) << bit_nr; 1370 n -= bits_to_copy; 1371 bit_nr += bits_to_copy; 1372 bits_to_copy = 8; 1373 bit_shift = 0; 1374 idx++; 1375 } 1376 1377 return value & mask; 1378 } 1379 1380 u32 hid_field_extract(const struct hid_device *hid, u8 *report, 1381 unsigned offset, unsigned n) 1382 { 1383 if (n > 32) { 1384 hid_warn_once(hid, "%s() called with n (%d) > 32! (%s)\n", 1385 __func__, n, current->comm); 1386 n = 32; 1387 } 1388 1389 return __extract(report, offset, n); 1390 } 1391 EXPORT_SYMBOL_GPL(hid_field_extract); 1392 1393 /* 1394 * "implement" : set bits in a little endian bit stream. 1395 * Same concepts as "extract" (see comments above). 1396 * The data mangled in the bit stream remains in little endian 1397 * order the whole time. It make more sense to talk about 1398 * endianness of register values by considering a register 1399 * a "cached" copy of the little endian bit stream. 1400 */ 1401 1402 static void __implement(u8 *report, unsigned offset, int n, u32 value) 1403 { 1404 unsigned int idx = offset / 8; 1405 unsigned int bit_shift = offset % 8; 1406 int bits_to_set = 8 - bit_shift; 1407 1408 while (n - bits_to_set >= 0) { 1409 report[idx] &= ~(0xff << bit_shift); 1410 report[idx] |= value << bit_shift; 1411 value >>= bits_to_set; 1412 n -= bits_to_set; 1413 bits_to_set = 8; 1414 bit_shift = 0; 1415 idx++; 1416 } 1417 1418 /* last nibble */ 1419 if (n) { 1420 u8 bit_mask = ((1U << n) - 1); 1421 report[idx] &= ~(bit_mask << bit_shift); 1422 report[idx] |= value << bit_shift; 1423 } 1424 } 1425 1426 static void implement(const struct hid_device *hid, u8 *report, 1427 unsigned offset, unsigned n, u32 value) 1428 { 1429 if (unlikely(n > 32)) { 1430 hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n", 1431 __func__, n, current->comm); 1432 n = 32; 1433 } else if (n < 32) { 1434 u32 m = (1U << n) - 1; 1435 1436 if (unlikely(value > m)) { 1437 hid_warn(hid, 1438 "%s() called with too large value %d (n: %d)! (%s)\n", 1439 __func__, value, n, current->comm); 1440 WARN_ON(1); 1441 value &= m; 1442 } 1443 } 1444 1445 __implement(report, offset, n, value); 1446 } 1447 1448 /* 1449 * Search an array for a value. 1450 */ 1451 1452 static int search(__s32 *array, __s32 value, unsigned n) 1453 { 1454 while (n--) { 1455 if (*array++ == value) 1456 return 0; 1457 } 1458 return -1; 1459 } 1460 1461 /** 1462 * hid_match_report - check if driver's raw_event should be called 1463 * 1464 * @hid: hid device 1465 * @report: hid report to match against 1466 * 1467 * compare hid->driver->report_table->report_type to report->type 1468 */ 1469 static int hid_match_report(struct hid_device *hid, struct hid_report *report) 1470 { 1471 const struct hid_report_id *id = hid->driver->report_table; 1472 1473 if (!id) /* NULL means all */ 1474 return 1; 1475 1476 for (; id->report_type != HID_TERMINATOR; id++) 1477 if (id->report_type == HID_ANY_ID || 1478 id->report_type == report->type) 1479 return 1; 1480 return 0; 1481 } 1482 1483 /** 1484 * hid_match_usage - check if driver's event should be called 1485 * 1486 * @hid: hid device 1487 * @usage: usage to match against 1488 * 1489 * compare hid->driver->usage_table->usage_{type,code} to 1490 * usage->usage_{type,code} 1491 */ 1492 static int hid_match_usage(struct hid_device *hid, struct hid_usage *usage) 1493 { 1494 const struct hid_usage_id *id = hid->driver->usage_table; 1495 1496 if (!id) /* NULL means all */ 1497 return 1; 1498 1499 for (; id->usage_type != HID_ANY_ID - 1; id++) 1500 if ((id->usage_hid == HID_ANY_ID || 1501 id->usage_hid == usage->hid) && 1502 (id->usage_type == HID_ANY_ID || 1503 id->usage_type == usage->type) && 1504 (id->usage_code == HID_ANY_ID || 1505 id->usage_code == usage->code)) 1506 return 1; 1507 return 0; 1508 } 1509 1510 static void hid_process_event(struct hid_device *hid, struct hid_field *field, 1511 struct hid_usage *usage, __s32 value, int interrupt) 1512 { 1513 struct hid_driver *hdrv = hid->driver; 1514 int ret; 1515 1516 if (!list_empty(&hid->debug_list)) 1517 hid_dump_input(hid, usage, value); 1518 1519 if (hdrv && hdrv->event && hid_match_usage(hid, usage)) { 1520 ret = hdrv->event(hid, field, usage, value); 1521 if (ret != 0) { 1522 if (ret < 0) 1523 hid_err(hid, "%s's event failed with %d\n", 1524 hdrv->name, ret); 1525 return; 1526 } 1527 } 1528 1529 if (hid->claimed & HID_CLAIMED_INPUT) 1530 hidinput_hid_event(hid, field, usage, value); 1531 if (hid->claimed & HID_CLAIMED_HIDDEV && interrupt && hid->hiddev_hid_event) 1532 hid->hiddev_hid_event(hid, field, usage, value); 1533 } 1534 1535 /* 1536 * Checks if the given value is valid within this field 1537 */ 1538 static inline int hid_array_value_is_valid(struct hid_field *field, 1539 __s32 value) 1540 { 1541 __s32 min = field->logical_minimum; 1542 1543 /* 1544 * Value needs to be between logical min and max, and 1545 * (value - min) is used as an index in the usage array. 1546 * This array is of size field->maxusage 1547 */ 1548 return value >= min && 1549 value <= field->logical_maximum && 1550 value - min < field->maxusage; 1551 } 1552 1553 /* 1554 * Fetch the field from the data. The field content is stored for next 1555 * report processing (we do differential reporting to the layer). 1556 */ 1557 static void hid_input_fetch_field(struct hid_device *hid, 1558 struct hid_field *field, 1559 __u8 *data) 1560 { 1561 unsigned n; 1562 unsigned count = field->report_count; 1563 unsigned offset = field->report_offset; 1564 unsigned size = field->report_size; 1565 __s32 min = field->logical_minimum; 1566 __s32 *value; 1567 1568 value = field->new_value; 1569 memset(value, 0, count * sizeof(__s32)); 1570 field->ignored = false; 1571 1572 for (n = 0; n < count; n++) { 1573 1574 value[n] = min < 0 ? 1575 snto32(hid_field_extract(hid, data, offset + n * size, 1576 size), size) : 1577 hid_field_extract(hid, data, offset + n * size, size); 1578 1579 /* Ignore report if ErrorRollOver */ 1580 if (!(field->flags & HID_MAIN_ITEM_VARIABLE) && 1581 hid_array_value_is_valid(field, value[n]) && 1582 field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1) { 1583 field->ignored = true; 1584 return; 1585 } 1586 } 1587 } 1588 1589 /* 1590 * Process a received variable field. 1591 */ 1592 1593 static void hid_input_var_field(struct hid_device *hid, 1594 struct hid_field *field, 1595 int interrupt) 1596 { 1597 unsigned int count = field->report_count; 1598 __s32 *value = field->new_value; 1599 unsigned int n; 1600 1601 for (n = 0; n < count; n++) 1602 hid_process_event(hid, 1603 field, 1604 &field->usage[n], 1605 value[n], 1606 interrupt); 1607 1608 memcpy(field->value, value, count * sizeof(__s32)); 1609 } 1610 1611 /* 1612 * Process a received array field. The field content is stored for 1613 * next report processing (we do differential reporting to the layer). 1614 */ 1615 1616 static void hid_input_array_field(struct hid_device *hid, 1617 struct hid_field *field, 1618 int interrupt) 1619 { 1620 unsigned int n; 1621 unsigned int count = field->report_count; 1622 __s32 min = field->logical_minimum; 1623 __s32 *value; 1624 1625 value = field->new_value; 1626 1627 /* ErrorRollOver */ 1628 if (field->ignored) 1629 return; 1630 1631 for (n = 0; n < count; n++) { 1632 if (hid_array_value_is_valid(field, field->value[n]) && 1633 search(value, field->value[n], count)) 1634 hid_process_event(hid, 1635 field, 1636 &field->usage[field->value[n] - min], 1637 0, 1638 interrupt); 1639 1640 if (hid_array_value_is_valid(field, value[n]) && 1641 search(field->value, value[n], count)) 1642 hid_process_event(hid, 1643 field, 1644 &field->usage[value[n] - min], 1645 1, 1646 interrupt); 1647 } 1648 1649 memcpy(field->value, value, count * sizeof(__s32)); 1650 } 1651 1652 /* 1653 * Analyse a received report, and fetch the data from it. The field 1654 * content is stored for next report processing (we do differential 1655 * reporting to the layer). 1656 */ 1657 static void hid_process_report(struct hid_device *hid, 1658 struct hid_report *report, 1659 __u8 *data, 1660 int interrupt) 1661 { 1662 unsigned int a; 1663 struct hid_field_entry *entry; 1664 struct hid_field *field; 1665 1666 /* first retrieve all incoming values in data */ 1667 for (a = 0; a < report->maxfield; a++) 1668 hid_input_fetch_field(hid, report->field[a], data); 1669 1670 if (!list_empty(&report->field_entry_list)) { 1671 /* INPUT_REPORT, we have a priority list of fields */ 1672 list_for_each_entry(entry, 1673 &report->field_entry_list, 1674 list) { 1675 field = entry->field; 1676 1677 if (field->flags & HID_MAIN_ITEM_VARIABLE) 1678 hid_process_event(hid, 1679 field, 1680 &field->usage[entry->index], 1681 field->new_value[entry->index], 1682 interrupt); 1683 else 1684 hid_input_array_field(hid, field, interrupt); 1685 } 1686 1687 /* we need to do the memcpy at the end for var items */ 1688 for (a = 0; a < report->maxfield; a++) { 1689 field = report->field[a]; 1690 1691 if (field->flags & HID_MAIN_ITEM_VARIABLE) 1692 memcpy(field->value, field->new_value, 1693 field->report_count * sizeof(__s32)); 1694 } 1695 } else { 1696 /* FEATURE_REPORT, regular processing */ 1697 for (a = 0; a < report->maxfield; a++) { 1698 field = report->field[a]; 1699 1700 if (field->flags & HID_MAIN_ITEM_VARIABLE) 1701 hid_input_var_field(hid, field, interrupt); 1702 else 1703 hid_input_array_field(hid, field, interrupt); 1704 } 1705 } 1706 } 1707 1708 /* 1709 * Insert a given usage_index in a field in the list 1710 * of processed usages in the report. 1711 * 1712 * The elements of lower priority score are processed 1713 * first. 1714 */ 1715 static void __hid_insert_field_entry(struct hid_device *hid, 1716 struct hid_report *report, 1717 struct hid_field_entry *entry, 1718 struct hid_field *field, 1719 unsigned int usage_index) 1720 { 1721 struct hid_field_entry *next; 1722 1723 entry->field = field; 1724 entry->index = usage_index; 1725 entry->priority = field->usages_priorities[usage_index]; 1726 1727 /* insert the element at the correct position */ 1728 list_for_each_entry(next, 1729 &report->field_entry_list, 1730 list) { 1731 /* 1732 * the priority of our element is strictly higher 1733 * than the next one, insert it before 1734 */ 1735 if (entry->priority > next->priority) { 1736 list_add_tail(&entry->list, &next->list); 1737 return; 1738 } 1739 } 1740 1741 /* lowest priority score: insert at the end */ 1742 list_add_tail(&entry->list, &report->field_entry_list); 1743 } 1744 1745 static void hid_report_process_ordering(struct hid_device *hid, 1746 struct hid_report *report) 1747 { 1748 struct hid_field *field; 1749 struct hid_field_entry *entries; 1750 unsigned int a, u, usages; 1751 unsigned int count = 0; 1752 1753 /* count the number of individual fields in the report */ 1754 for (a = 0; a < report->maxfield; a++) { 1755 field = report->field[a]; 1756 1757 if (field->flags & HID_MAIN_ITEM_VARIABLE) 1758 count += field->report_count; 1759 else 1760 count++; 1761 } 1762 1763 /* allocate the memory to process the fields */ 1764 entries = kcalloc(count, sizeof(*entries), GFP_KERNEL); 1765 if (!entries) 1766 return; 1767 1768 report->field_entries = entries; 1769 1770 /* 1771 * walk through all fields in the report and 1772 * store them by priority order in report->field_entry_list 1773 * 1774 * - Var elements are individualized (field + usage_index) 1775 * - Arrays are taken as one, we can not chose an order for them 1776 */ 1777 usages = 0; 1778 for (a = 0; a < report->maxfield; a++) { 1779 field = report->field[a]; 1780 1781 if (field->flags & HID_MAIN_ITEM_VARIABLE) { 1782 for (u = 0; u < field->report_count; u++) { 1783 __hid_insert_field_entry(hid, report, 1784 &entries[usages], 1785 field, u); 1786 usages++; 1787 } 1788 } else { 1789 __hid_insert_field_entry(hid, report, &entries[usages], 1790 field, 0); 1791 usages++; 1792 } 1793 } 1794 } 1795 1796 static void hid_process_ordering(struct hid_device *hid) 1797 { 1798 struct hid_report *report; 1799 struct hid_report_enum *report_enum = &hid->report_enum[HID_INPUT_REPORT]; 1800 1801 list_for_each_entry(report, &report_enum->report_list, list) 1802 hid_report_process_ordering(hid, report); 1803 } 1804 1805 /* 1806 * Output the field into the report. 1807 */ 1808 1809 static void hid_output_field(const struct hid_device *hid, 1810 struct hid_field *field, __u8 *data) 1811 { 1812 unsigned count = field->report_count; 1813 unsigned offset = field->report_offset; 1814 unsigned size = field->report_size; 1815 unsigned n; 1816 1817 for (n = 0; n < count; n++) { 1818 if (field->logical_minimum < 0) /* signed values */ 1819 implement(hid, data, offset + n * size, size, 1820 s32ton(field->value[n], size)); 1821 else /* unsigned values */ 1822 implement(hid, data, offset + n * size, size, 1823 field->value[n]); 1824 } 1825 } 1826 1827 /* 1828 * Compute the size of a report. 1829 */ 1830 static size_t hid_compute_report_size(struct hid_report *report) 1831 { 1832 if (report->size) 1833 return ((report->size - 1) >> 3) + 1; 1834 1835 return 0; 1836 } 1837 1838 /* 1839 * Create a report. 'data' has to be allocated using 1840 * hid_alloc_report_buf() so that it has proper size. 1841 */ 1842 1843 void hid_output_report(struct hid_report *report, __u8 *data) 1844 { 1845 unsigned n; 1846 1847 if (report->id > 0) 1848 *data++ = report->id; 1849 1850 memset(data, 0, hid_compute_report_size(report)); 1851 for (n = 0; n < report->maxfield; n++) 1852 hid_output_field(report->device, report->field[n], data); 1853 } 1854 EXPORT_SYMBOL_GPL(hid_output_report); 1855 1856 /* 1857 * Allocator for buffer that is going to be passed to hid_output_report() 1858 */ 1859 u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags) 1860 { 1861 /* 1862 * 7 extra bytes are necessary to achieve proper functionality 1863 * of implement() working on 8 byte chunks 1864 */ 1865 1866 u32 len = hid_report_len(report) + 7; 1867 1868 return kmalloc(len, flags); 1869 } 1870 EXPORT_SYMBOL_GPL(hid_alloc_report_buf); 1871 1872 /* 1873 * Set a field value. The report this field belongs to has to be 1874 * created and transferred to the device, to set this value in the 1875 * device. 1876 */ 1877 1878 int hid_set_field(struct hid_field *field, unsigned offset, __s32 value) 1879 { 1880 unsigned size; 1881 1882 if (!field) 1883 return -1; 1884 1885 size = field->report_size; 1886 1887 hid_dump_input(field->report->device, field->usage + offset, value); 1888 1889 if (offset >= field->report_count) { 1890 hid_err(field->report->device, "offset (%d) exceeds report_count (%d)\n", 1891 offset, field->report_count); 1892 return -1; 1893 } 1894 if (field->logical_minimum < 0) { 1895 if (value != snto32(s32ton(value, size), size)) { 1896 hid_err(field->report->device, "value %d is out of range\n", value); 1897 return -1; 1898 } 1899 } 1900 field->value[offset] = value; 1901 return 0; 1902 } 1903 EXPORT_SYMBOL_GPL(hid_set_field); 1904 1905 static struct hid_report *hid_get_report(struct hid_report_enum *report_enum, 1906 const u8 *data) 1907 { 1908 struct hid_report *report; 1909 unsigned int n = 0; /* Normally report number is 0 */ 1910 1911 /* Device uses numbered reports, data[0] is report number */ 1912 if (report_enum->numbered) 1913 n = *data; 1914 1915 report = report_enum->report_id_hash[n]; 1916 if (report == NULL) 1917 dbg_hid("undefined report_id %u received\n", n); 1918 1919 return report; 1920 } 1921 1922 /* 1923 * Implement a generic .request() callback, using .raw_request() 1924 * DO NOT USE in hid drivers directly, but through hid_hw_request instead. 1925 */ 1926 int __hid_request(struct hid_device *hid, struct hid_report *report, 1927 enum hid_class_request reqtype) 1928 { 1929 char *buf; 1930 int ret; 1931 u32 len; 1932 1933 buf = hid_alloc_report_buf(report, GFP_KERNEL); 1934 if (!buf) 1935 return -ENOMEM; 1936 1937 len = hid_report_len(report); 1938 1939 if (reqtype == HID_REQ_SET_REPORT) 1940 hid_output_report(report, buf); 1941 1942 ret = hid->ll_driver->raw_request(hid, report->id, buf, len, 1943 report->type, reqtype); 1944 if (ret < 0) { 1945 dbg_hid("unable to complete request: %d\n", ret); 1946 goto out; 1947 } 1948 1949 if (reqtype == HID_REQ_GET_REPORT) 1950 hid_input_report(hid, report->type, buf, ret, 0); 1951 1952 ret = 0; 1953 1954 out: 1955 kfree(buf); 1956 return ret; 1957 } 1958 EXPORT_SYMBOL_GPL(__hid_request); 1959 1960 int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size, 1961 int interrupt) 1962 { 1963 struct hid_report_enum *report_enum = hid->report_enum + type; 1964 struct hid_report *report; 1965 struct hid_driver *hdrv; 1966 u32 rsize, csize = size; 1967 u8 *cdata = data; 1968 int ret = 0; 1969 1970 report = hid_get_report(report_enum, data); 1971 if (!report) 1972 goto out; 1973 1974 if (report_enum->numbered) { 1975 cdata++; 1976 csize--; 1977 } 1978 1979 rsize = hid_compute_report_size(report); 1980 1981 if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE) 1982 rsize = HID_MAX_BUFFER_SIZE - 1; 1983 else if (rsize > HID_MAX_BUFFER_SIZE) 1984 rsize = HID_MAX_BUFFER_SIZE; 1985 1986 if (csize < rsize) { 1987 dbg_hid("report %d is too short, (%d < %d)\n", report->id, 1988 csize, rsize); 1989 memset(cdata + csize, 0, rsize - csize); 1990 } 1991 1992 if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event) 1993 hid->hiddev_report_event(hid, report); 1994 if (hid->claimed & HID_CLAIMED_HIDRAW) { 1995 ret = hidraw_report_event(hid, data, size); 1996 if (ret) 1997 goto out; 1998 } 1999 2000 if (hid->claimed != HID_CLAIMED_HIDRAW && report->maxfield) { 2001 hid_process_report(hid, report, cdata, interrupt); 2002 hdrv = hid->driver; 2003 if (hdrv && hdrv->report) 2004 hdrv->report(hid, report); 2005 } 2006 2007 if (hid->claimed & HID_CLAIMED_INPUT) 2008 hidinput_report_event(hid, report); 2009 out: 2010 return ret; 2011 } 2012 EXPORT_SYMBOL_GPL(hid_report_raw_event); 2013 2014 /** 2015 * hid_input_report - report data from lower layer (usb, bt...) 2016 * 2017 * @hid: hid device 2018 * @type: HID report type (HID_*_REPORT) 2019 * @data: report contents 2020 * @size: size of data parameter 2021 * @interrupt: distinguish between interrupt and control transfers 2022 * 2023 * This is data entry for lower layers. 2024 */ 2025 int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size, 2026 int interrupt) 2027 { 2028 struct hid_report_enum *report_enum; 2029 struct hid_driver *hdrv; 2030 struct hid_report *report; 2031 int ret = 0; 2032 2033 if (!hid) 2034 return -ENODEV; 2035 2036 if (down_trylock(&hid->driver_input_lock)) 2037 return -EBUSY; 2038 2039 if (!hid->driver) { 2040 ret = -ENODEV; 2041 goto unlock; 2042 } 2043 report_enum = hid->report_enum + type; 2044 hdrv = hid->driver; 2045 2046 data = dispatch_hid_bpf_device_event(hid, type, data, &size, interrupt); 2047 if (IS_ERR(data)) { 2048 ret = PTR_ERR(data); 2049 goto unlock; 2050 } 2051 2052 if (!size) { 2053 dbg_hid("empty report\n"); 2054 ret = -1; 2055 goto unlock; 2056 } 2057 2058 /* Avoid unnecessary overhead if debugfs is disabled */ 2059 if (!list_empty(&hid->debug_list)) 2060 hid_dump_report(hid, type, data, size); 2061 2062 report = hid_get_report(report_enum, data); 2063 2064 if (!report) { 2065 ret = -1; 2066 goto unlock; 2067 } 2068 2069 if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) { 2070 ret = hdrv->raw_event(hid, report, data, size); 2071 if (ret < 0) 2072 goto unlock; 2073 } 2074 2075 ret = hid_report_raw_event(hid, type, data, size, interrupt); 2076 2077 unlock: 2078 up(&hid->driver_input_lock); 2079 return ret; 2080 } 2081 EXPORT_SYMBOL_GPL(hid_input_report); 2082 2083 bool hid_match_one_id(const struct hid_device *hdev, 2084 const struct hid_device_id *id) 2085 { 2086 return (id->bus == HID_BUS_ANY || id->bus == hdev->bus) && 2087 (id->group == HID_GROUP_ANY || id->group == hdev->group) && 2088 (id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) && 2089 (id->product == HID_ANY_ID || id->product == hdev->product); 2090 } 2091 2092 const struct hid_device_id *hid_match_id(const struct hid_device *hdev, 2093 const struct hid_device_id *id) 2094 { 2095 for (; id->bus; id++) 2096 if (hid_match_one_id(hdev, id)) 2097 return id; 2098 2099 return NULL; 2100 } 2101 EXPORT_SYMBOL_GPL(hid_match_id); 2102 2103 static const struct hid_device_id hid_hiddev_list[] = { 2104 { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS) }, 2105 { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS1) }, 2106 { } 2107 }; 2108 2109 static bool hid_hiddev(struct hid_device *hdev) 2110 { 2111 return !!hid_match_id(hdev, hid_hiddev_list); 2112 } 2113 2114 2115 static ssize_t 2116 read_report_descriptor(struct file *filp, struct kobject *kobj, 2117 struct bin_attribute *attr, 2118 char *buf, loff_t off, size_t count) 2119 { 2120 struct device *dev = kobj_to_dev(kobj); 2121 struct hid_device *hdev = to_hid_device(dev); 2122 2123 if (off >= hdev->rsize) 2124 return 0; 2125 2126 if (off + count > hdev->rsize) 2127 count = hdev->rsize - off; 2128 2129 memcpy(buf, hdev->rdesc + off, count); 2130 2131 return count; 2132 } 2133 2134 static ssize_t 2135 show_country(struct device *dev, struct device_attribute *attr, 2136 char *buf) 2137 { 2138 struct hid_device *hdev = to_hid_device(dev); 2139 2140 return sprintf(buf, "%02x\n", hdev->country & 0xff); 2141 } 2142 2143 static struct bin_attribute dev_bin_attr_report_desc = { 2144 .attr = { .name = "report_descriptor", .mode = 0444 }, 2145 .read = read_report_descriptor, 2146 .size = HID_MAX_DESCRIPTOR_SIZE, 2147 }; 2148 2149 static const struct device_attribute dev_attr_country = { 2150 .attr = { .name = "country", .mode = 0444 }, 2151 .show = show_country, 2152 }; 2153 2154 int hid_connect(struct hid_device *hdev, unsigned int connect_mask) 2155 { 2156 static const char *types[] = { "Device", "Pointer", "Mouse", "Device", 2157 "Joystick", "Gamepad", "Keyboard", "Keypad", 2158 "Multi-Axis Controller" 2159 }; 2160 const char *type, *bus; 2161 char buf[64] = ""; 2162 unsigned int i; 2163 int len; 2164 int ret; 2165 2166 ret = hid_bpf_connect_device(hdev); 2167 if (ret) 2168 return ret; 2169 2170 if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE) 2171 connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV); 2172 if (hdev->quirks & HID_QUIRK_HIDINPUT_FORCE) 2173 connect_mask |= HID_CONNECT_HIDINPUT_FORCE; 2174 if (hdev->bus != BUS_USB) 2175 connect_mask &= ~HID_CONNECT_HIDDEV; 2176 if (hid_hiddev(hdev)) 2177 connect_mask |= HID_CONNECT_HIDDEV_FORCE; 2178 2179 if ((connect_mask & HID_CONNECT_HIDINPUT) && !hidinput_connect(hdev, 2180 connect_mask & HID_CONNECT_HIDINPUT_FORCE)) 2181 hdev->claimed |= HID_CLAIMED_INPUT; 2182 2183 if ((connect_mask & HID_CONNECT_HIDDEV) && hdev->hiddev_connect && 2184 !hdev->hiddev_connect(hdev, 2185 connect_mask & HID_CONNECT_HIDDEV_FORCE)) 2186 hdev->claimed |= HID_CLAIMED_HIDDEV; 2187 if ((connect_mask & HID_CONNECT_HIDRAW) && !hidraw_connect(hdev)) 2188 hdev->claimed |= HID_CLAIMED_HIDRAW; 2189 2190 if (connect_mask & HID_CONNECT_DRIVER) 2191 hdev->claimed |= HID_CLAIMED_DRIVER; 2192 2193 /* Drivers with the ->raw_event callback set are not required to connect 2194 * to any other listener. */ 2195 if (!hdev->claimed && !hdev->driver->raw_event) { 2196 hid_err(hdev, "device has no listeners, quitting\n"); 2197 return -ENODEV; 2198 } 2199 2200 hid_process_ordering(hdev); 2201 2202 if ((hdev->claimed & HID_CLAIMED_INPUT) && 2203 (connect_mask & HID_CONNECT_FF) && hdev->ff_init) 2204 hdev->ff_init(hdev); 2205 2206 len = 0; 2207 if (hdev->claimed & HID_CLAIMED_INPUT) 2208 len += sprintf(buf + len, "input"); 2209 if (hdev->claimed & HID_CLAIMED_HIDDEV) 2210 len += sprintf(buf + len, "%shiddev%d", len ? "," : "", 2211 ((struct hiddev *)hdev->hiddev)->minor); 2212 if (hdev->claimed & HID_CLAIMED_HIDRAW) 2213 len += sprintf(buf + len, "%shidraw%d", len ? "," : "", 2214 ((struct hidraw *)hdev->hidraw)->minor); 2215 2216 type = "Device"; 2217 for (i = 0; i < hdev->maxcollection; i++) { 2218 struct hid_collection *col = &hdev->collection[i]; 2219 if (col->type == HID_COLLECTION_APPLICATION && 2220 (col->usage & HID_USAGE_PAGE) == HID_UP_GENDESK && 2221 (col->usage & 0xffff) < ARRAY_SIZE(types)) { 2222 type = types[col->usage & 0xffff]; 2223 break; 2224 } 2225 } 2226 2227 switch (hdev->bus) { 2228 case BUS_USB: 2229 bus = "USB"; 2230 break; 2231 case BUS_BLUETOOTH: 2232 bus = "BLUETOOTH"; 2233 break; 2234 case BUS_I2C: 2235 bus = "I2C"; 2236 break; 2237 case BUS_VIRTUAL: 2238 bus = "VIRTUAL"; 2239 break; 2240 case BUS_INTEL_ISHTP: 2241 case BUS_AMD_SFH: 2242 bus = "SENSOR HUB"; 2243 break; 2244 default: 2245 bus = "<UNKNOWN>"; 2246 } 2247 2248 ret = device_create_file(&hdev->dev, &dev_attr_country); 2249 if (ret) 2250 hid_warn(hdev, 2251 "can't create sysfs country code attribute err: %d\n", ret); 2252 2253 hid_info(hdev, "%s: %s HID v%x.%02x %s [%s] on %s\n", 2254 buf, bus, hdev->version >> 8, hdev->version & 0xff, 2255 type, hdev->name, hdev->phys); 2256 2257 return 0; 2258 } 2259 EXPORT_SYMBOL_GPL(hid_connect); 2260 2261 void hid_disconnect(struct hid_device *hdev) 2262 { 2263 device_remove_file(&hdev->dev, &dev_attr_country); 2264 if (hdev->claimed & HID_CLAIMED_INPUT) 2265 hidinput_disconnect(hdev); 2266 if (hdev->claimed & HID_CLAIMED_HIDDEV) 2267 hdev->hiddev_disconnect(hdev); 2268 if (hdev->claimed & HID_CLAIMED_HIDRAW) 2269 hidraw_disconnect(hdev); 2270 hdev->claimed = 0; 2271 2272 hid_bpf_disconnect_device(hdev); 2273 } 2274 EXPORT_SYMBOL_GPL(hid_disconnect); 2275 2276 /** 2277 * hid_hw_start - start underlying HW 2278 * @hdev: hid device 2279 * @connect_mask: which outputs to connect, see HID_CONNECT_* 2280 * 2281 * Call this in probe function *after* hid_parse. This will setup HW 2282 * buffers and start the device (if not defeirred to device open). 2283 * hid_hw_stop must be called if this was successful. 2284 */ 2285 int hid_hw_start(struct hid_device *hdev, unsigned int connect_mask) 2286 { 2287 int error; 2288 2289 error = hdev->ll_driver->start(hdev); 2290 if (error) 2291 return error; 2292 2293 if (connect_mask) { 2294 error = hid_connect(hdev, connect_mask); 2295 if (error) { 2296 hdev->ll_driver->stop(hdev); 2297 return error; 2298 } 2299 } 2300 2301 return 0; 2302 } 2303 EXPORT_SYMBOL_GPL(hid_hw_start); 2304 2305 /** 2306 * hid_hw_stop - stop underlying HW 2307 * @hdev: hid device 2308 * 2309 * This is usually called from remove function or from probe when something 2310 * failed and hid_hw_start was called already. 2311 */ 2312 void hid_hw_stop(struct hid_device *hdev) 2313 { 2314 hid_disconnect(hdev); 2315 hdev->ll_driver->stop(hdev); 2316 } 2317 EXPORT_SYMBOL_GPL(hid_hw_stop); 2318 2319 /** 2320 * hid_hw_open - signal underlying HW to start delivering events 2321 * @hdev: hid device 2322 * 2323 * Tell underlying HW to start delivering events from the device. 2324 * This function should be called sometime after successful call 2325 * to hid_hw_start(). 2326 */ 2327 int hid_hw_open(struct hid_device *hdev) 2328 { 2329 int ret; 2330 2331 ret = mutex_lock_killable(&hdev->ll_open_lock); 2332 if (ret) 2333 return ret; 2334 2335 if (!hdev->ll_open_count++) { 2336 ret = hdev->ll_driver->open(hdev); 2337 if (ret) 2338 hdev->ll_open_count--; 2339 } 2340 2341 mutex_unlock(&hdev->ll_open_lock); 2342 return ret; 2343 } 2344 EXPORT_SYMBOL_GPL(hid_hw_open); 2345 2346 /** 2347 * hid_hw_close - signal underlaying HW to stop delivering events 2348 * 2349 * @hdev: hid device 2350 * 2351 * This function indicates that we are not interested in the events 2352 * from this device anymore. Delivery of events may or may not stop, 2353 * depending on the number of users still outstanding. 2354 */ 2355 void hid_hw_close(struct hid_device *hdev) 2356 { 2357 mutex_lock(&hdev->ll_open_lock); 2358 if (!--hdev->ll_open_count) 2359 hdev->ll_driver->close(hdev); 2360 mutex_unlock(&hdev->ll_open_lock); 2361 } 2362 EXPORT_SYMBOL_GPL(hid_hw_close); 2363 2364 /** 2365 * hid_hw_request - send report request to device 2366 * 2367 * @hdev: hid device 2368 * @report: report to send 2369 * @reqtype: hid request type 2370 */ 2371 void hid_hw_request(struct hid_device *hdev, 2372 struct hid_report *report, enum hid_class_request reqtype) 2373 { 2374 if (hdev->ll_driver->request) 2375 return hdev->ll_driver->request(hdev, report, reqtype); 2376 2377 __hid_request(hdev, report, reqtype); 2378 } 2379 EXPORT_SYMBOL_GPL(hid_hw_request); 2380 2381 /** 2382 * hid_hw_raw_request - send report request to device 2383 * 2384 * @hdev: hid device 2385 * @reportnum: report ID 2386 * @buf: in/out data to transfer 2387 * @len: length of buf 2388 * @rtype: HID report type 2389 * @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT 2390 * 2391 * Return: count of data transferred, negative if error 2392 * 2393 * Same behavior as hid_hw_request, but with raw buffers instead. 2394 */ 2395 int hid_hw_raw_request(struct hid_device *hdev, 2396 unsigned char reportnum, __u8 *buf, 2397 size_t len, enum hid_report_type rtype, enum hid_class_request reqtype) 2398 { 2399 if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf) 2400 return -EINVAL; 2401 2402 return hdev->ll_driver->raw_request(hdev, reportnum, buf, len, 2403 rtype, reqtype); 2404 } 2405 EXPORT_SYMBOL_GPL(hid_hw_raw_request); 2406 2407 /** 2408 * hid_hw_output_report - send output report to device 2409 * 2410 * @hdev: hid device 2411 * @buf: raw data to transfer 2412 * @len: length of buf 2413 * 2414 * Return: count of data transferred, negative if error 2415 */ 2416 int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len) 2417 { 2418 if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf) 2419 return -EINVAL; 2420 2421 if (hdev->ll_driver->output_report) 2422 return hdev->ll_driver->output_report(hdev, buf, len); 2423 2424 return -ENOSYS; 2425 } 2426 EXPORT_SYMBOL_GPL(hid_hw_output_report); 2427 2428 #ifdef CONFIG_PM 2429 int hid_driver_suspend(struct hid_device *hdev, pm_message_t state) 2430 { 2431 if (hdev->driver && hdev->driver->suspend) 2432 return hdev->driver->suspend(hdev, state); 2433 2434 return 0; 2435 } 2436 EXPORT_SYMBOL_GPL(hid_driver_suspend); 2437 2438 int hid_driver_reset_resume(struct hid_device *hdev) 2439 { 2440 if (hdev->driver && hdev->driver->reset_resume) 2441 return hdev->driver->reset_resume(hdev); 2442 2443 return 0; 2444 } 2445 EXPORT_SYMBOL_GPL(hid_driver_reset_resume); 2446 2447 int hid_driver_resume(struct hid_device *hdev) 2448 { 2449 if (hdev->driver && hdev->driver->resume) 2450 return hdev->driver->resume(hdev); 2451 2452 return 0; 2453 } 2454 EXPORT_SYMBOL_GPL(hid_driver_resume); 2455 #endif /* CONFIG_PM */ 2456 2457 struct hid_dynid { 2458 struct list_head list; 2459 struct hid_device_id id; 2460 }; 2461 2462 /** 2463 * new_id_store - add a new HID device ID to this driver and re-probe devices 2464 * @drv: target device driver 2465 * @buf: buffer for scanning device ID data 2466 * @count: input size 2467 * 2468 * Adds a new dynamic hid device ID to this driver, 2469 * and causes the driver to probe for all devices again. 2470 */ 2471 static ssize_t new_id_store(struct device_driver *drv, const char *buf, 2472 size_t count) 2473 { 2474 struct hid_driver *hdrv = to_hid_driver(drv); 2475 struct hid_dynid *dynid; 2476 __u32 bus, vendor, product; 2477 unsigned long driver_data = 0; 2478 int ret; 2479 2480 ret = sscanf(buf, "%x %x %x %lx", 2481 &bus, &vendor, &product, &driver_data); 2482 if (ret < 3) 2483 return -EINVAL; 2484 2485 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); 2486 if (!dynid) 2487 return -ENOMEM; 2488 2489 dynid->id.bus = bus; 2490 dynid->id.group = HID_GROUP_ANY; 2491 dynid->id.vendor = vendor; 2492 dynid->id.product = product; 2493 dynid->id.driver_data = driver_data; 2494 2495 spin_lock(&hdrv->dyn_lock); 2496 list_add_tail(&dynid->list, &hdrv->dyn_list); 2497 spin_unlock(&hdrv->dyn_lock); 2498 2499 ret = driver_attach(&hdrv->driver); 2500 2501 return ret ? : count; 2502 } 2503 static DRIVER_ATTR_WO(new_id); 2504 2505 static struct attribute *hid_drv_attrs[] = { 2506 &driver_attr_new_id.attr, 2507 NULL, 2508 }; 2509 ATTRIBUTE_GROUPS(hid_drv); 2510 2511 static void hid_free_dynids(struct hid_driver *hdrv) 2512 { 2513 struct hid_dynid *dynid, *n; 2514 2515 spin_lock(&hdrv->dyn_lock); 2516 list_for_each_entry_safe(dynid, n, &hdrv->dyn_list, list) { 2517 list_del(&dynid->list); 2518 kfree(dynid); 2519 } 2520 spin_unlock(&hdrv->dyn_lock); 2521 } 2522 2523 const struct hid_device_id *hid_match_device(struct hid_device *hdev, 2524 struct hid_driver *hdrv) 2525 { 2526 struct hid_dynid *dynid; 2527 2528 spin_lock(&hdrv->dyn_lock); 2529 list_for_each_entry(dynid, &hdrv->dyn_list, list) { 2530 if (hid_match_one_id(hdev, &dynid->id)) { 2531 spin_unlock(&hdrv->dyn_lock); 2532 return &dynid->id; 2533 } 2534 } 2535 spin_unlock(&hdrv->dyn_lock); 2536 2537 return hid_match_id(hdev, hdrv->id_table); 2538 } 2539 EXPORT_SYMBOL_GPL(hid_match_device); 2540 2541 static int hid_bus_match(struct device *dev, struct device_driver *drv) 2542 { 2543 struct hid_driver *hdrv = to_hid_driver(drv); 2544 struct hid_device *hdev = to_hid_device(dev); 2545 2546 return hid_match_device(hdev, hdrv) != NULL; 2547 } 2548 2549 /** 2550 * hid_compare_device_paths - check if both devices share the same path 2551 * @hdev_a: hid device 2552 * @hdev_b: hid device 2553 * @separator: char to use as separator 2554 * 2555 * Check if two devices share the same path up to the last occurrence of 2556 * the separator char. Both paths must exist (i.e., zero-length paths 2557 * don't match). 2558 */ 2559 bool hid_compare_device_paths(struct hid_device *hdev_a, 2560 struct hid_device *hdev_b, char separator) 2561 { 2562 int n1 = strrchr(hdev_a->phys, separator) - hdev_a->phys; 2563 int n2 = strrchr(hdev_b->phys, separator) - hdev_b->phys; 2564 2565 if (n1 != n2 || n1 <= 0 || n2 <= 0) 2566 return false; 2567 2568 return !strncmp(hdev_a->phys, hdev_b->phys, n1); 2569 } 2570 EXPORT_SYMBOL_GPL(hid_compare_device_paths); 2571 2572 static int hid_device_probe(struct device *dev) 2573 { 2574 struct hid_driver *hdrv = to_hid_driver(dev->driver); 2575 struct hid_device *hdev = to_hid_device(dev); 2576 const struct hid_device_id *id; 2577 int ret = 0; 2578 2579 if (down_interruptible(&hdev->driver_input_lock)) { 2580 ret = -EINTR; 2581 goto end; 2582 } 2583 hdev->io_started = false; 2584 2585 clear_bit(ffs(HID_STAT_REPROBED), &hdev->status); 2586 2587 if (!hdev->driver) { 2588 id = hid_match_device(hdev, hdrv); 2589 if (id == NULL) { 2590 ret = -ENODEV; 2591 goto unlock; 2592 } 2593 2594 if (hdrv->match) { 2595 if (!hdrv->match(hdev, hid_ignore_special_drivers)) { 2596 ret = -ENODEV; 2597 goto unlock; 2598 } 2599 } else { 2600 /* 2601 * hid-generic implements .match(), so if 2602 * hid_ignore_special_drivers is set, we can safely 2603 * return. 2604 */ 2605 if (hid_ignore_special_drivers) { 2606 ret = -ENODEV; 2607 goto unlock; 2608 } 2609 } 2610 2611 /* reset the quirks that has been previously set */ 2612 hdev->quirks = hid_lookup_quirk(hdev); 2613 hdev->driver = hdrv; 2614 if (hdrv->probe) { 2615 ret = hdrv->probe(hdev, id); 2616 } else { /* default probe */ 2617 ret = hid_open_report(hdev); 2618 if (!ret) 2619 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); 2620 } 2621 if (ret) { 2622 hid_close_report(hdev); 2623 hdev->driver = NULL; 2624 } 2625 } 2626 unlock: 2627 if (!hdev->io_started) 2628 up(&hdev->driver_input_lock); 2629 end: 2630 return ret; 2631 } 2632 2633 static void hid_device_remove(struct device *dev) 2634 { 2635 struct hid_device *hdev = to_hid_device(dev); 2636 struct hid_driver *hdrv; 2637 2638 down(&hdev->driver_input_lock); 2639 hdev->io_started = false; 2640 2641 hdrv = hdev->driver; 2642 if (hdrv) { 2643 if (hdrv->remove) 2644 hdrv->remove(hdev); 2645 else /* default remove */ 2646 hid_hw_stop(hdev); 2647 hid_close_report(hdev); 2648 hdev->driver = NULL; 2649 } 2650 2651 if (!hdev->io_started) 2652 up(&hdev->driver_input_lock); 2653 } 2654 2655 static ssize_t modalias_show(struct device *dev, struct device_attribute *a, 2656 char *buf) 2657 { 2658 struct hid_device *hdev = container_of(dev, struct hid_device, dev); 2659 2660 return scnprintf(buf, PAGE_SIZE, "hid:b%04Xg%04Xv%08Xp%08X\n", 2661 hdev->bus, hdev->group, hdev->vendor, hdev->product); 2662 } 2663 static DEVICE_ATTR_RO(modalias); 2664 2665 static struct attribute *hid_dev_attrs[] = { 2666 &dev_attr_modalias.attr, 2667 NULL, 2668 }; 2669 static struct bin_attribute *hid_dev_bin_attrs[] = { 2670 &dev_bin_attr_report_desc, 2671 NULL 2672 }; 2673 static const struct attribute_group hid_dev_group = { 2674 .attrs = hid_dev_attrs, 2675 .bin_attrs = hid_dev_bin_attrs, 2676 }; 2677 __ATTRIBUTE_GROUPS(hid_dev); 2678 2679 static int hid_uevent(struct device *dev, struct kobj_uevent_env *env) 2680 { 2681 struct hid_device *hdev = to_hid_device(dev); 2682 2683 if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X", 2684 hdev->bus, hdev->vendor, hdev->product)) 2685 return -ENOMEM; 2686 2687 if (add_uevent_var(env, "HID_NAME=%s", hdev->name)) 2688 return -ENOMEM; 2689 2690 if (add_uevent_var(env, "HID_PHYS=%s", hdev->phys)) 2691 return -ENOMEM; 2692 2693 if (add_uevent_var(env, "HID_UNIQ=%s", hdev->uniq)) 2694 return -ENOMEM; 2695 2696 if (add_uevent_var(env, "MODALIAS=hid:b%04Xg%04Xv%08Xp%08X", 2697 hdev->bus, hdev->group, hdev->vendor, hdev->product)) 2698 return -ENOMEM; 2699 2700 return 0; 2701 } 2702 2703 struct bus_type hid_bus_type = { 2704 .name = "hid", 2705 .dev_groups = hid_dev_groups, 2706 .drv_groups = hid_drv_groups, 2707 .match = hid_bus_match, 2708 .probe = hid_device_probe, 2709 .remove = hid_device_remove, 2710 .uevent = hid_uevent, 2711 }; 2712 EXPORT_SYMBOL(hid_bus_type); 2713 2714 int hid_add_device(struct hid_device *hdev) 2715 { 2716 static atomic_t id = ATOMIC_INIT(0); 2717 int ret; 2718 2719 if (WARN_ON(hdev->status & HID_STAT_ADDED)) 2720 return -EBUSY; 2721 2722 hdev->quirks = hid_lookup_quirk(hdev); 2723 2724 /* we need to kill them here, otherwise they will stay allocated to 2725 * wait for coming driver */ 2726 if (hid_ignore(hdev)) 2727 return -ENODEV; 2728 2729 /* 2730 * Check for the mandatory transport channel. 2731 */ 2732 if (!hdev->ll_driver->raw_request) { 2733 hid_err(hdev, "transport driver missing .raw_request()\n"); 2734 return -EINVAL; 2735 } 2736 2737 /* 2738 * Read the device report descriptor once and use as template 2739 * for the driver-specific modifications. 2740 */ 2741 ret = hdev->ll_driver->parse(hdev); 2742 if (ret) 2743 return ret; 2744 if (!hdev->dev_rdesc) 2745 return -ENODEV; 2746 2747 /* 2748 * Scan generic devices for group information 2749 */ 2750 if (hid_ignore_special_drivers) { 2751 hdev->group = HID_GROUP_GENERIC; 2752 } else if (!hdev->group && 2753 !(hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)) { 2754 ret = hid_scan_report(hdev); 2755 if (ret) 2756 hid_warn(hdev, "bad device descriptor (%d)\n", ret); 2757 } 2758 2759 hdev->id = atomic_inc_return(&id); 2760 2761 /* XXX hack, any other cleaner solution after the driver core 2762 * is converted to allow more than 20 bytes as the device name? */ 2763 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus, 2764 hdev->vendor, hdev->product, hdev->id); 2765 2766 hid_debug_register(hdev, dev_name(&hdev->dev)); 2767 ret = device_add(&hdev->dev); 2768 if (!ret) 2769 hdev->status |= HID_STAT_ADDED; 2770 else 2771 hid_debug_unregister(hdev); 2772 2773 return ret; 2774 } 2775 EXPORT_SYMBOL_GPL(hid_add_device); 2776 2777 /** 2778 * hid_allocate_device - allocate new hid device descriptor 2779 * 2780 * Allocate and initialize hid device, so that hid_destroy_device might be 2781 * used to free it. 2782 * 2783 * New hid_device pointer is returned on success, otherwise ERR_PTR encoded 2784 * error value. 2785 */ 2786 struct hid_device *hid_allocate_device(void) 2787 { 2788 struct hid_device *hdev; 2789 int ret = -ENOMEM; 2790 2791 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL); 2792 if (hdev == NULL) 2793 return ERR_PTR(ret); 2794 2795 device_initialize(&hdev->dev); 2796 hdev->dev.release = hid_device_release; 2797 hdev->dev.bus = &hid_bus_type; 2798 device_enable_async_suspend(&hdev->dev); 2799 2800 hid_close_report(hdev); 2801 2802 init_waitqueue_head(&hdev->debug_wait); 2803 INIT_LIST_HEAD(&hdev->debug_list); 2804 spin_lock_init(&hdev->debug_list_lock); 2805 sema_init(&hdev->driver_input_lock, 1); 2806 mutex_init(&hdev->ll_open_lock); 2807 2808 hid_bpf_device_init(hdev); 2809 2810 return hdev; 2811 } 2812 EXPORT_SYMBOL_GPL(hid_allocate_device); 2813 2814 static void hid_remove_device(struct hid_device *hdev) 2815 { 2816 if (hdev->status & HID_STAT_ADDED) { 2817 device_del(&hdev->dev); 2818 hid_debug_unregister(hdev); 2819 hdev->status &= ~HID_STAT_ADDED; 2820 } 2821 kfree(hdev->dev_rdesc); 2822 hdev->dev_rdesc = NULL; 2823 hdev->dev_rsize = 0; 2824 } 2825 2826 /** 2827 * hid_destroy_device - free previously allocated device 2828 * 2829 * @hdev: hid device 2830 * 2831 * If you allocate hid_device through hid_allocate_device, you should ever 2832 * free by this function. 2833 */ 2834 void hid_destroy_device(struct hid_device *hdev) 2835 { 2836 hid_bpf_destroy_device(hdev); 2837 hid_remove_device(hdev); 2838 put_device(&hdev->dev); 2839 } 2840 EXPORT_SYMBOL_GPL(hid_destroy_device); 2841 2842 2843 static int __hid_bus_reprobe_drivers(struct device *dev, void *data) 2844 { 2845 struct hid_driver *hdrv = data; 2846 struct hid_device *hdev = to_hid_device(dev); 2847 2848 if (hdev->driver == hdrv && 2849 !hdrv->match(hdev, hid_ignore_special_drivers) && 2850 !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status)) 2851 return device_reprobe(dev); 2852 2853 return 0; 2854 } 2855 2856 static int __hid_bus_driver_added(struct device_driver *drv, void *data) 2857 { 2858 struct hid_driver *hdrv = to_hid_driver(drv); 2859 2860 if (hdrv->match) { 2861 bus_for_each_dev(&hid_bus_type, NULL, hdrv, 2862 __hid_bus_reprobe_drivers); 2863 } 2864 2865 return 0; 2866 } 2867 2868 static int __bus_removed_driver(struct device_driver *drv, void *data) 2869 { 2870 return bus_rescan_devices(&hid_bus_type); 2871 } 2872 2873 int __hid_register_driver(struct hid_driver *hdrv, struct module *owner, 2874 const char *mod_name) 2875 { 2876 int ret; 2877 2878 hdrv->driver.name = hdrv->name; 2879 hdrv->driver.bus = &hid_bus_type; 2880 hdrv->driver.owner = owner; 2881 hdrv->driver.mod_name = mod_name; 2882 2883 INIT_LIST_HEAD(&hdrv->dyn_list); 2884 spin_lock_init(&hdrv->dyn_lock); 2885 2886 ret = driver_register(&hdrv->driver); 2887 2888 if (ret == 0) 2889 bus_for_each_drv(&hid_bus_type, NULL, NULL, 2890 __hid_bus_driver_added); 2891 2892 return ret; 2893 } 2894 EXPORT_SYMBOL_GPL(__hid_register_driver); 2895 2896 void hid_unregister_driver(struct hid_driver *hdrv) 2897 { 2898 driver_unregister(&hdrv->driver); 2899 hid_free_dynids(hdrv); 2900 2901 bus_for_each_drv(&hid_bus_type, NULL, hdrv, __bus_removed_driver); 2902 } 2903 EXPORT_SYMBOL_GPL(hid_unregister_driver); 2904 2905 int hid_check_keys_pressed(struct hid_device *hid) 2906 { 2907 struct hid_input *hidinput; 2908 int i; 2909 2910 if (!(hid->claimed & HID_CLAIMED_INPUT)) 2911 return 0; 2912 2913 list_for_each_entry(hidinput, &hid->inputs, list) { 2914 for (i = 0; i < BITS_TO_LONGS(KEY_MAX); i++) 2915 if (hidinput->input->key[i]) 2916 return 1; 2917 } 2918 2919 return 0; 2920 } 2921 EXPORT_SYMBOL_GPL(hid_check_keys_pressed); 2922 2923 #ifdef CONFIG_HID_BPF 2924 static struct hid_bpf_ops hid_ops = { 2925 .hid_get_report = hid_get_report, 2926 .hid_hw_raw_request = hid_hw_raw_request, 2927 .owner = THIS_MODULE, 2928 .bus_type = &hid_bus_type, 2929 }; 2930 #endif 2931 2932 static int __init hid_init(void) 2933 { 2934 int ret; 2935 2936 ret = bus_register(&hid_bus_type); 2937 if (ret) { 2938 pr_err("can't register hid bus\n"); 2939 goto err; 2940 } 2941 2942 #ifdef CONFIG_HID_BPF 2943 hid_bpf_ops = &hid_ops; 2944 #endif 2945 2946 ret = hidraw_init(); 2947 if (ret) 2948 goto err_bus; 2949 2950 hid_debug_init(); 2951 2952 return 0; 2953 err_bus: 2954 bus_unregister(&hid_bus_type); 2955 err: 2956 return ret; 2957 } 2958 2959 static void __exit hid_exit(void) 2960 { 2961 #ifdef CONFIG_HID_BPF 2962 hid_bpf_ops = NULL; 2963 #endif 2964 hid_debug_exit(); 2965 hidraw_exit(); 2966 bus_unregister(&hid_bus_type); 2967 hid_quirks_exit(HID_BUS_ANY); 2968 } 2969 2970 module_init(hid_init); 2971 module_exit(hid_exit); 2972 2973 MODULE_AUTHOR("Andreas Gal"); 2974 MODULE_AUTHOR("Vojtech Pavlik"); 2975 MODULE_AUTHOR("Jiri Kosina"); 2976 MODULE_LICENSE("GPL"); 2977