1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * HID support for Linux 4 * 5 * Copyright (c) 1999 Andreas Gal 6 * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> 7 * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc 8 * Copyright (c) 2006-2012 Jiri Kosina 9 */ 10 11 /* 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 16 #include <linux/module.h> 17 #include <linux/slab.h> 18 #include <linux/init.h> 19 #include <linux/kernel.h> 20 #include <linux/list.h> 21 #include <linux/mm.h> 22 #include <linux/spinlock.h> 23 #include <asm/unaligned.h> 24 #include <asm/byteorder.h> 25 #include <linux/input.h> 26 #include <linux/wait.h> 27 #include <linux/vmalloc.h> 28 #include <linux/sched.h> 29 #include <linux/semaphore.h> 30 31 #include <linux/hid.h> 32 #include <linux/hiddev.h> 33 #include <linux/hid-debug.h> 34 #include <linux/hidraw.h> 35 36 #include "hid-ids.h" 37 38 /* 39 * Version Information 40 */ 41 42 #define DRIVER_DESC "HID core driver" 43 44 int hid_debug = 0; 45 module_param_named(debug, hid_debug, int, 0600); 46 MODULE_PARM_DESC(debug, "toggle HID debugging messages"); 47 EXPORT_SYMBOL_GPL(hid_debug); 48 49 static int hid_ignore_special_drivers = 0; 50 module_param_named(ignore_special_drivers, hid_ignore_special_drivers, int, 0600); 51 MODULE_PARM_DESC(ignore_special_drivers, "Ignore any special drivers and handle all devices by generic driver"); 52 53 /* 54 * Register a new report for a device. 55 */ 56 57 struct hid_report *hid_register_report(struct hid_device *device, 58 enum hid_report_type type, unsigned int id, 59 unsigned int application) 60 { 61 struct hid_report_enum *report_enum = device->report_enum + type; 62 struct hid_report *report; 63 64 if (id >= HID_MAX_IDS) 65 return NULL; 66 if (report_enum->report_id_hash[id]) 67 return report_enum->report_id_hash[id]; 68 69 report = kzalloc(sizeof(struct hid_report), GFP_KERNEL); 70 if (!report) 71 return NULL; 72 73 if (id != 0) 74 report_enum->numbered = 1; 75 76 report->id = id; 77 report->type = type; 78 report->size = 0; 79 report->device = device; 80 report->application = application; 81 report_enum->report_id_hash[id] = report; 82 83 list_add_tail(&report->list, &report_enum->report_list); 84 INIT_LIST_HEAD(&report->field_entry_list); 85 86 return report; 87 } 88 EXPORT_SYMBOL_GPL(hid_register_report); 89 90 /* 91 * Register a new field for this report. 92 */ 93 94 static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages) 95 { 96 struct hid_field *field; 97 98 if (report->maxfield == HID_MAX_FIELDS) { 99 hid_err(report->device, "too many fields in report\n"); 100 return NULL; 101 } 102 103 field = kzalloc((sizeof(struct hid_field) + 104 usages * sizeof(struct hid_usage) + 105 3 * usages * sizeof(unsigned int)), GFP_KERNEL); 106 if (!field) 107 return NULL; 108 109 field->index = report->maxfield++; 110 report->field[field->index] = field; 111 field->usage = (struct hid_usage *)(field + 1); 112 field->value = (s32 *)(field->usage + usages); 113 field->new_value = (s32 *)(field->value + usages); 114 field->usages_priorities = (s32 *)(field->new_value + usages); 115 field->report = report; 116 117 return field; 118 } 119 120 /* 121 * Open a collection. The type/usage is pushed on the stack. 122 */ 123 124 static int open_collection(struct hid_parser *parser, unsigned type) 125 { 126 struct hid_collection *collection; 127 unsigned usage; 128 int collection_index; 129 130 usage = parser->local.usage[0]; 131 132 if (parser->collection_stack_ptr == parser->collection_stack_size) { 133 unsigned int *collection_stack; 134 unsigned int new_size = parser->collection_stack_size + 135 HID_COLLECTION_STACK_SIZE; 136 137 collection_stack = krealloc(parser->collection_stack, 138 new_size * sizeof(unsigned int), 139 GFP_KERNEL); 140 if (!collection_stack) 141 return -ENOMEM; 142 143 parser->collection_stack = collection_stack; 144 parser->collection_stack_size = new_size; 145 } 146 147 if (parser->device->maxcollection == parser->device->collection_size) { 148 collection = kmalloc( 149 array3_size(sizeof(struct hid_collection), 150 parser->device->collection_size, 151 2), 152 GFP_KERNEL); 153 if (collection == NULL) { 154 hid_err(parser->device, "failed to reallocate collection array\n"); 155 return -ENOMEM; 156 } 157 memcpy(collection, parser->device->collection, 158 sizeof(struct hid_collection) * 159 parser->device->collection_size); 160 memset(collection + parser->device->collection_size, 0, 161 sizeof(struct hid_collection) * 162 parser->device->collection_size); 163 kfree(parser->device->collection); 164 parser->device->collection = collection; 165 parser->device->collection_size *= 2; 166 } 167 168 parser->collection_stack[parser->collection_stack_ptr++] = 169 parser->device->maxcollection; 170 171 collection_index = parser->device->maxcollection++; 172 collection = parser->device->collection + collection_index; 173 collection->type = type; 174 collection->usage = usage; 175 collection->level = parser->collection_stack_ptr - 1; 176 collection->parent_idx = (collection->level == 0) ? -1 : 177 parser->collection_stack[collection->level - 1]; 178 179 if (type == HID_COLLECTION_APPLICATION) 180 parser->device->maxapplication++; 181 182 return 0; 183 } 184 185 /* 186 * Close a collection. 187 */ 188 189 static int close_collection(struct hid_parser *parser) 190 { 191 if (!parser->collection_stack_ptr) { 192 hid_err(parser->device, "collection stack underflow\n"); 193 return -EINVAL; 194 } 195 parser->collection_stack_ptr--; 196 return 0; 197 } 198 199 /* 200 * Climb up the stack, search for the specified collection type 201 * and return the usage. 202 */ 203 204 static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type) 205 { 206 struct hid_collection *collection = parser->device->collection; 207 int n; 208 209 for (n = parser->collection_stack_ptr - 1; n >= 0; n--) { 210 unsigned index = parser->collection_stack[n]; 211 if (collection[index].type == type) 212 return collection[index].usage; 213 } 214 return 0; /* we know nothing about this usage type */ 215 } 216 217 /* 218 * Concatenate usage which defines 16 bits or less with the 219 * currently defined usage page to form a 32 bit usage 220 */ 221 222 static void complete_usage(struct hid_parser *parser, unsigned int index) 223 { 224 parser->local.usage[index] &= 0xFFFF; 225 parser->local.usage[index] |= 226 (parser->global.usage_page & 0xFFFF) << 16; 227 } 228 229 /* 230 * Add a usage to the temporary parser table. 231 */ 232 233 static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size) 234 { 235 if (parser->local.usage_index >= HID_MAX_USAGES) { 236 hid_err(parser->device, "usage index exceeded\n"); 237 return -1; 238 } 239 parser->local.usage[parser->local.usage_index] = usage; 240 241 /* 242 * If Usage item only includes usage id, concatenate it with 243 * currently defined usage page 244 */ 245 if (size <= 2) 246 complete_usage(parser, parser->local.usage_index); 247 248 parser->local.usage_size[parser->local.usage_index] = size; 249 parser->local.collection_index[parser->local.usage_index] = 250 parser->collection_stack_ptr ? 251 parser->collection_stack[parser->collection_stack_ptr - 1] : 0; 252 parser->local.usage_index++; 253 return 0; 254 } 255 256 /* 257 * Register a new field for this report. 258 */ 259 260 static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsigned flags) 261 { 262 struct hid_report *report; 263 struct hid_field *field; 264 unsigned int usages; 265 unsigned int offset; 266 unsigned int i; 267 unsigned int application; 268 269 application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION); 270 271 report = hid_register_report(parser->device, report_type, 272 parser->global.report_id, application); 273 if (!report) { 274 hid_err(parser->device, "hid_register_report failed\n"); 275 return -1; 276 } 277 278 /* Handle both signed and unsigned cases properly */ 279 if ((parser->global.logical_minimum < 0 && 280 parser->global.logical_maximum < 281 parser->global.logical_minimum) || 282 (parser->global.logical_minimum >= 0 && 283 (__u32)parser->global.logical_maximum < 284 (__u32)parser->global.logical_minimum)) { 285 dbg_hid("logical range invalid 0x%x 0x%x\n", 286 parser->global.logical_minimum, 287 parser->global.logical_maximum); 288 return -1; 289 } 290 291 offset = report->size; 292 report->size += parser->global.report_size * parser->global.report_count; 293 294 /* Total size check: Allow for possible report index byte */ 295 if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) { 296 hid_err(parser->device, "report is too long\n"); 297 return -1; 298 } 299 300 if (!parser->local.usage_index) /* Ignore padding fields */ 301 return 0; 302 303 usages = max_t(unsigned, parser->local.usage_index, 304 parser->global.report_count); 305 306 field = hid_register_field(report, usages); 307 if (!field) 308 return 0; 309 310 field->physical = hid_lookup_collection(parser, HID_COLLECTION_PHYSICAL); 311 field->logical = hid_lookup_collection(parser, HID_COLLECTION_LOGICAL); 312 field->application = application; 313 314 for (i = 0; i < usages; i++) { 315 unsigned j = i; 316 /* Duplicate the last usage we parsed if we have excess values */ 317 if (i >= parser->local.usage_index) 318 j = parser->local.usage_index - 1; 319 field->usage[i].hid = parser->local.usage[j]; 320 field->usage[i].collection_index = 321 parser->local.collection_index[j]; 322 field->usage[i].usage_index = i; 323 field->usage[i].resolution_multiplier = 1; 324 } 325 326 field->maxusage = usages; 327 field->flags = flags; 328 field->report_offset = offset; 329 field->report_type = report_type; 330 field->report_size = parser->global.report_size; 331 field->report_count = parser->global.report_count; 332 field->logical_minimum = parser->global.logical_minimum; 333 field->logical_maximum = parser->global.logical_maximum; 334 field->physical_minimum = parser->global.physical_minimum; 335 field->physical_maximum = parser->global.physical_maximum; 336 field->unit_exponent = parser->global.unit_exponent; 337 field->unit = parser->global.unit; 338 339 return 0; 340 } 341 342 /* 343 * Read data value from item. 344 */ 345 346 static u32 item_udata(struct hid_item *item) 347 { 348 switch (item->size) { 349 case 1: return item->data.u8; 350 case 2: return item->data.u16; 351 case 4: return item->data.u32; 352 } 353 return 0; 354 } 355 356 static s32 item_sdata(struct hid_item *item) 357 { 358 switch (item->size) { 359 case 1: return item->data.s8; 360 case 2: return item->data.s16; 361 case 4: return item->data.s32; 362 } 363 return 0; 364 } 365 366 /* 367 * Process a global item. 368 */ 369 370 static int hid_parser_global(struct hid_parser *parser, struct hid_item *item) 371 { 372 __s32 raw_value; 373 switch (item->tag) { 374 case HID_GLOBAL_ITEM_TAG_PUSH: 375 376 if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) { 377 hid_err(parser->device, "global environment stack overflow\n"); 378 return -1; 379 } 380 381 memcpy(parser->global_stack + parser->global_stack_ptr++, 382 &parser->global, sizeof(struct hid_global)); 383 return 0; 384 385 case HID_GLOBAL_ITEM_TAG_POP: 386 387 if (!parser->global_stack_ptr) { 388 hid_err(parser->device, "global environment stack underflow\n"); 389 return -1; 390 } 391 392 memcpy(&parser->global, parser->global_stack + 393 --parser->global_stack_ptr, sizeof(struct hid_global)); 394 return 0; 395 396 case HID_GLOBAL_ITEM_TAG_USAGE_PAGE: 397 parser->global.usage_page = item_udata(item); 398 return 0; 399 400 case HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM: 401 parser->global.logical_minimum = item_sdata(item); 402 return 0; 403 404 case HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM: 405 if (parser->global.logical_minimum < 0) 406 parser->global.logical_maximum = item_sdata(item); 407 else 408 parser->global.logical_maximum = item_udata(item); 409 return 0; 410 411 case HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM: 412 parser->global.physical_minimum = item_sdata(item); 413 return 0; 414 415 case HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM: 416 if (parser->global.physical_minimum < 0) 417 parser->global.physical_maximum = item_sdata(item); 418 else 419 parser->global.physical_maximum = item_udata(item); 420 return 0; 421 422 case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT: 423 /* Many devices provide unit exponent as a two's complement 424 * nibble due to the common misunderstanding of HID 425 * specification 1.11, 6.2.2.7 Global Items. Attempt to handle 426 * both this and the standard encoding. */ 427 raw_value = item_sdata(item); 428 if (!(raw_value & 0xfffffff0)) 429 parser->global.unit_exponent = hid_snto32(raw_value, 4); 430 else 431 parser->global.unit_exponent = raw_value; 432 return 0; 433 434 case HID_GLOBAL_ITEM_TAG_UNIT: 435 parser->global.unit = item_udata(item); 436 return 0; 437 438 case HID_GLOBAL_ITEM_TAG_REPORT_SIZE: 439 parser->global.report_size = item_udata(item); 440 if (parser->global.report_size > 256) { 441 hid_err(parser->device, "invalid report_size %d\n", 442 parser->global.report_size); 443 return -1; 444 } 445 return 0; 446 447 case HID_GLOBAL_ITEM_TAG_REPORT_COUNT: 448 parser->global.report_count = item_udata(item); 449 if (parser->global.report_count > HID_MAX_USAGES) { 450 hid_err(parser->device, "invalid report_count %d\n", 451 parser->global.report_count); 452 return -1; 453 } 454 return 0; 455 456 case HID_GLOBAL_ITEM_TAG_REPORT_ID: 457 parser->global.report_id = item_udata(item); 458 if (parser->global.report_id == 0 || 459 parser->global.report_id >= HID_MAX_IDS) { 460 hid_err(parser->device, "report_id %u is invalid\n", 461 parser->global.report_id); 462 return -1; 463 } 464 return 0; 465 466 default: 467 hid_err(parser->device, "unknown global tag 0x%x\n", item->tag); 468 return -1; 469 } 470 } 471 472 /* 473 * Process a local item. 474 */ 475 476 static int hid_parser_local(struct hid_parser *parser, struct hid_item *item) 477 { 478 __u32 data; 479 unsigned n; 480 __u32 count; 481 482 data = item_udata(item); 483 484 switch (item->tag) { 485 case HID_LOCAL_ITEM_TAG_DELIMITER: 486 487 if (data) { 488 /* 489 * We treat items before the first delimiter 490 * as global to all usage sets (branch 0). 491 * In the moment we process only these global 492 * items and the first delimiter set. 493 */ 494 if (parser->local.delimiter_depth != 0) { 495 hid_err(parser->device, "nested delimiters\n"); 496 return -1; 497 } 498 parser->local.delimiter_depth++; 499 parser->local.delimiter_branch++; 500 } else { 501 if (parser->local.delimiter_depth < 1) { 502 hid_err(parser->device, "bogus close delimiter\n"); 503 return -1; 504 } 505 parser->local.delimiter_depth--; 506 } 507 return 0; 508 509 case HID_LOCAL_ITEM_TAG_USAGE: 510 511 if (parser->local.delimiter_branch > 1) { 512 dbg_hid("alternative usage ignored\n"); 513 return 0; 514 } 515 516 return hid_add_usage(parser, data, item->size); 517 518 case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM: 519 520 if (parser->local.delimiter_branch > 1) { 521 dbg_hid("alternative usage ignored\n"); 522 return 0; 523 } 524 525 parser->local.usage_minimum = data; 526 return 0; 527 528 case HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM: 529 530 if (parser->local.delimiter_branch > 1) { 531 dbg_hid("alternative usage ignored\n"); 532 return 0; 533 } 534 535 count = data - parser->local.usage_minimum; 536 if (count + parser->local.usage_index >= HID_MAX_USAGES) { 537 /* 538 * We do not warn if the name is not set, we are 539 * actually pre-scanning the device. 540 */ 541 if (dev_name(&parser->device->dev)) 542 hid_warn(parser->device, 543 "ignoring exceeding usage max\n"); 544 data = HID_MAX_USAGES - parser->local.usage_index + 545 parser->local.usage_minimum - 1; 546 if (data <= 0) { 547 hid_err(parser->device, 548 "no more usage index available\n"); 549 return -1; 550 } 551 } 552 553 for (n = parser->local.usage_minimum; n <= data; n++) 554 if (hid_add_usage(parser, n, item->size)) { 555 dbg_hid("hid_add_usage failed\n"); 556 return -1; 557 } 558 return 0; 559 560 default: 561 562 dbg_hid("unknown local item tag 0x%x\n", item->tag); 563 return 0; 564 } 565 return 0; 566 } 567 568 /* 569 * Concatenate Usage Pages into Usages where relevant: 570 * As per specification, 6.2.2.8: "When the parser encounters a main item it 571 * concatenates the last declared Usage Page with a Usage to form a complete 572 * usage value." 573 */ 574 575 static void hid_concatenate_last_usage_page(struct hid_parser *parser) 576 { 577 int i; 578 unsigned int usage_page; 579 unsigned int current_page; 580 581 if (!parser->local.usage_index) 582 return; 583 584 usage_page = parser->global.usage_page; 585 586 /* 587 * Concatenate usage page again only if last declared Usage Page 588 * has not been already used in previous usages concatenation 589 */ 590 for (i = parser->local.usage_index - 1; i >= 0; i--) { 591 if (parser->local.usage_size[i] > 2) 592 /* Ignore extended usages */ 593 continue; 594 595 current_page = parser->local.usage[i] >> 16; 596 if (current_page == usage_page) 597 break; 598 599 complete_usage(parser, i); 600 } 601 } 602 603 /* 604 * Process a main item. 605 */ 606 607 static int hid_parser_main(struct hid_parser *parser, struct hid_item *item) 608 { 609 __u32 data; 610 int ret; 611 612 hid_concatenate_last_usage_page(parser); 613 614 data = item_udata(item); 615 616 switch (item->tag) { 617 case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION: 618 ret = open_collection(parser, data & 0xff); 619 break; 620 case HID_MAIN_ITEM_TAG_END_COLLECTION: 621 ret = close_collection(parser); 622 break; 623 case HID_MAIN_ITEM_TAG_INPUT: 624 ret = hid_add_field(parser, HID_INPUT_REPORT, data); 625 break; 626 case HID_MAIN_ITEM_TAG_OUTPUT: 627 ret = hid_add_field(parser, HID_OUTPUT_REPORT, data); 628 break; 629 case HID_MAIN_ITEM_TAG_FEATURE: 630 ret = hid_add_field(parser, HID_FEATURE_REPORT, data); 631 break; 632 default: 633 hid_warn(parser->device, "unknown main item tag 0x%x\n", item->tag); 634 ret = 0; 635 } 636 637 memset(&parser->local, 0, sizeof(parser->local)); /* Reset the local parser environment */ 638 639 return ret; 640 } 641 642 /* 643 * Process a reserved item. 644 */ 645 646 static int hid_parser_reserved(struct hid_parser *parser, struct hid_item *item) 647 { 648 dbg_hid("reserved item type, tag 0x%x\n", item->tag); 649 return 0; 650 } 651 652 /* 653 * Free a report and all registered fields. The field->usage and 654 * field->value table's are allocated behind the field, so we need 655 * only to free(field) itself. 656 */ 657 658 static void hid_free_report(struct hid_report *report) 659 { 660 unsigned n; 661 662 kfree(report->field_entries); 663 664 for (n = 0; n < report->maxfield; n++) 665 kfree(report->field[n]); 666 kfree(report); 667 } 668 669 /* 670 * Close report. This function returns the device 671 * state to the point prior to hid_open_report(). 672 */ 673 static void hid_close_report(struct hid_device *device) 674 { 675 unsigned i, j; 676 677 for (i = 0; i < HID_REPORT_TYPES; i++) { 678 struct hid_report_enum *report_enum = device->report_enum + i; 679 680 for (j = 0; j < HID_MAX_IDS; j++) { 681 struct hid_report *report = report_enum->report_id_hash[j]; 682 if (report) 683 hid_free_report(report); 684 } 685 memset(report_enum, 0, sizeof(*report_enum)); 686 INIT_LIST_HEAD(&report_enum->report_list); 687 } 688 689 kfree(device->rdesc); 690 device->rdesc = NULL; 691 device->rsize = 0; 692 693 kfree(device->collection); 694 device->collection = NULL; 695 device->collection_size = 0; 696 device->maxcollection = 0; 697 device->maxapplication = 0; 698 699 device->status &= ~HID_STAT_PARSED; 700 } 701 702 /* 703 * Free a device structure, all reports, and all fields. 704 */ 705 706 static void hid_device_release(struct device *dev) 707 { 708 struct hid_device *hid = to_hid_device(dev); 709 710 hid_close_report(hid); 711 kfree(hid->dev_rdesc); 712 kfree(hid); 713 } 714 715 /* 716 * Fetch a report description item from the data stream. We support long 717 * items, though they are not used yet. 718 */ 719 720 static u8 *fetch_item(__u8 *start, __u8 *end, struct hid_item *item) 721 { 722 u8 b; 723 724 if ((end - start) <= 0) 725 return NULL; 726 727 b = *start++; 728 729 item->type = (b >> 2) & 3; 730 item->tag = (b >> 4) & 15; 731 732 if (item->tag == HID_ITEM_TAG_LONG) { 733 734 item->format = HID_ITEM_FORMAT_LONG; 735 736 if ((end - start) < 2) 737 return NULL; 738 739 item->size = *start++; 740 item->tag = *start++; 741 742 if ((end - start) < item->size) 743 return NULL; 744 745 item->data.longdata = start; 746 start += item->size; 747 return start; 748 } 749 750 item->format = HID_ITEM_FORMAT_SHORT; 751 item->size = b & 3; 752 753 switch (item->size) { 754 case 0: 755 return start; 756 757 case 1: 758 if ((end - start) < 1) 759 return NULL; 760 item->data.u8 = *start++; 761 return start; 762 763 case 2: 764 if ((end - start) < 2) 765 return NULL; 766 item->data.u16 = get_unaligned_le16(start); 767 start = (__u8 *)((__le16 *)start + 1); 768 return start; 769 770 case 3: 771 item->size++; 772 if ((end - start) < 4) 773 return NULL; 774 item->data.u32 = get_unaligned_le32(start); 775 start = (__u8 *)((__le32 *)start + 1); 776 return start; 777 } 778 779 return NULL; 780 } 781 782 static void hid_scan_input_usage(struct hid_parser *parser, u32 usage) 783 { 784 struct hid_device *hid = parser->device; 785 786 if (usage == HID_DG_CONTACTID) 787 hid->group = HID_GROUP_MULTITOUCH; 788 } 789 790 static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage) 791 { 792 if (usage == 0xff0000c5 && parser->global.report_count == 256 && 793 parser->global.report_size == 8) 794 parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8; 795 796 if (usage == 0xff0000c6 && parser->global.report_count == 1 && 797 parser->global.report_size == 8) 798 parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8; 799 } 800 801 static void hid_scan_collection(struct hid_parser *parser, unsigned type) 802 { 803 struct hid_device *hid = parser->device; 804 int i; 805 806 if (((parser->global.usage_page << 16) == HID_UP_SENSOR) && 807 type == HID_COLLECTION_PHYSICAL) 808 hid->group = HID_GROUP_SENSOR_HUB; 809 810 if (hid->vendor == USB_VENDOR_ID_MICROSOFT && 811 hid->product == USB_DEVICE_ID_MS_POWER_COVER && 812 hid->group == HID_GROUP_MULTITOUCH) 813 hid->group = HID_GROUP_GENERIC; 814 815 if ((parser->global.usage_page << 16) == HID_UP_GENDESK) 816 for (i = 0; i < parser->local.usage_index; i++) 817 if (parser->local.usage[i] == HID_GD_POINTER) 818 parser->scan_flags |= HID_SCAN_FLAG_GD_POINTER; 819 820 if ((parser->global.usage_page << 16) >= HID_UP_MSVENDOR) 821 parser->scan_flags |= HID_SCAN_FLAG_VENDOR_SPECIFIC; 822 823 if ((parser->global.usage_page << 16) == HID_UP_GOOGLEVENDOR) 824 for (i = 0; i < parser->local.usage_index; i++) 825 if (parser->local.usage[i] == 826 (HID_UP_GOOGLEVENDOR | 0x0001)) 827 parser->device->group = 828 HID_GROUP_VIVALDI; 829 } 830 831 static int hid_scan_main(struct hid_parser *parser, struct hid_item *item) 832 { 833 __u32 data; 834 int i; 835 836 hid_concatenate_last_usage_page(parser); 837 838 data = item_udata(item); 839 840 switch (item->tag) { 841 case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION: 842 hid_scan_collection(parser, data & 0xff); 843 break; 844 case HID_MAIN_ITEM_TAG_END_COLLECTION: 845 break; 846 case HID_MAIN_ITEM_TAG_INPUT: 847 /* ignore constant inputs, they will be ignored by hid-input */ 848 if (data & HID_MAIN_ITEM_CONSTANT) 849 break; 850 for (i = 0; i < parser->local.usage_index; i++) 851 hid_scan_input_usage(parser, parser->local.usage[i]); 852 break; 853 case HID_MAIN_ITEM_TAG_OUTPUT: 854 break; 855 case HID_MAIN_ITEM_TAG_FEATURE: 856 for (i = 0; i < parser->local.usage_index; i++) 857 hid_scan_feature_usage(parser, parser->local.usage[i]); 858 break; 859 } 860 861 /* Reset the local parser environment */ 862 memset(&parser->local, 0, sizeof(parser->local)); 863 864 return 0; 865 } 866 867 /* 868 * Scan a report descriptor before the device is added to the bus. 869 * Sets device groups and other properties that determine what driver 870 * to load. 871 */ 872 static int hid_scan_report(struct hid_device *hid) 873 { 874 struct hid_parser *parser; 875 struct hid_item item; 876 __u8 *start = hid->dev_rdesc; 877 __u8 *end = start + hid->dev_rsize; 878 static int (*dispatch_type[])(struct hid_parser *parser, 879 struct hid_item *item) = { 880 hid_scan_main, 881 hid_parser_global, 882 hid_parser_local, 883 hid_parser_reserved 884 }; 885 886 parser = vzalloc(sizeof(struct hid_parser)); 887 if (!parser) 888 return -ENOMEM; 889 890 parser->device = hid; 891 hid->group = HID_GROUP_GENERIC; 892 893 /* 894 * The parsing is simpler than the one in hid_open_report() as we should 895 * be robust against hid errors. Those errors will be raised by 896 * hid_open_report() anyway. 897 */ 898 while ((start = fetch_item(start, end, &item)) != NULL) 899 dispatch_type[item.type](parser, &item); 900 901 /* 902 * Handle special flags set during scanning. 903 */ 904 if ((parser->scan_flags & HID_SCAN_FLAG_MT_WIN_8) && 905 (hid->group == HID_GROUP_MULTITOUCH)) 906 hid->group = HID_GROUP_MULTITOUCH_WIN_8; 907 908 /* 909 * Vendor specific handlings 910 */ 911 switch (hid->vendor) { 912 case USB_VENDOR_ID_WACOM: 913 hid->group = HID_GROUP_WACOM; 914 break; 915 case USB_VENDOR_ID_SYNAPTICS: 916 if (hid->group == HID_GROUP_GENERIC) 917 if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC) 918 && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER)) 919 /* 920 * hid-rmi should take care of them, 921 * not hid-generic 922 */ 923 hid->group = HID_GROUP_RMI; 924 break; 925 } 926 927 kfree(parser->collection_stack); 928 vfree(parser); 929 return 0; 930 } 931 932 /** 933 * hid_parse_report - parse device report 934 * 935 * @hid: hid device 936 * @start: report start 937 * @size: report size 938 * 939 * Allocate the device report as read by the bus driver. This function should 940 * only be called from parse() in ll drivers. 941 */ 942 int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size) 943 { 944 hid->dev_rdesc = kmemdup(start, size, GFP_KERNEL); 945 if (!hid->dev_rdesc) 946 return -ENOMEM; 947 hid->dev_rsize = size; 948 return 0; 949 } 950 EXPORT_SYMBOL_GPL(hid_parse_report); 951 952 static const char * const hid_report_names[] = { 953 "HID_INPUT_REPORT", 954 "HID_OUTPUT_REPORT", 955 "HID_FEATURE_REPORT", 956 }; 957 /** 958 * hid_validate_values - validate existing device report's value indexes 959 * 960 * @hid: hid device 961 * @type: which report type to examine 962 * @id: which report ID to examine (0 for first) 963 * @field_index: which report field to examine 964 * @report_counts: expected number of values 965 * 966 * Validate the number of values in a given field of a given report, after 967 * parsing. 968 */ 969 struct hid_report *hid_validate_values(struct hid_device *hid, 970 enum hid_report_type type, unsigned int id, 971 unsigned int field_index, 972 unsigned int report_counts) 973 { 974 struct hid_report *report; 975 976 if (type > HID_FEATURE_REPORT) { 977 hid_err(hid, "invalid HID report type %u\n", type); 978 return NULL; 979 } 980 981 if (id >= HID_MAX_IDS) { 982 hid_err(hid, "invalid HID report id %u\n", id); 983 return NULL; 984 } 985 986 /* 987 * Explicitly not using hid_get_report() here since it depends on 988 * ->numbered being checked, which may not always be the case when 989 * drivers go to access report values. 990 */ 991 if (id == 0) { 992 /* 993 * Validating on id 0 means we should examine the first 994 * report in the list. 995 */ 996 report = list_entry( 997 hid->report_enum[type].report_list.next, 998 struct hid_report, list); 999 } else { 1000 report = hid->report_enum[type].report_id_hash[id]; 1001 } 1002 if (!report) { 1003 hid_err(hid, "missing %s %u\n", hid_report_names[type], id); 1004 return NULL; 1005 } 1006 if (report->maxfield <= field_index) { 1007 hid_err(hid, "not enough fields in %s %u\n", 1008 hid_report_names[type], id); 1009 return NULL; 1010 } 1011 if (report->field[field_index]->report_count < report_counts) { 1012 hid_err(hid, "not enough values in %s %u field %u\n", 1013 hid_report_names[type], id, field_index); 1014 return NULL; 1015 } 1016 return report; 1017 } 1018 EXPORT_SYMBOL_GPL(hid_validate_values); 1019 1020 static int hid_calculate_multiplier(struct hid_device *hid, 1021 struct hid_field *multiplier) 1022 { 1023 int m; 1024 __s32 v = *multiplier->value; 1025 __s32 lmin = multiplier->logical_minimum; 1026 __s32 lmax = multiplier->logical_maximum; 1027 __s32 pmin = multiplier->physical_minimum; 1028 __s32 pmax = multiplier->physical_maximum; 1029 1030 /* 1031 * "Because OS implementations will generally divide the control's 1032 * reported count by the Effective Resolution Multiplier, designers 1033 * should take care not to establish a potential Effective 1034 * Resolution Multiplier of zero." 1035 * HID Usage Table, v1.12, Section 4.3.1, p31 1036 */ 1037 if (lmax - lmin == 0) 1038 return 1; 1039 /* 1040 * Handling the unit exponent is left as an exercise to whoever 1041 * finds a device where that exponent is not 0. 1042 */ 1043 m = ((v - lmin)/(lmax - lmin) * (pmax - pmin) + pmin); 1044 if (unlikely(multiplier->unit_exponent != 0)) { 1045 hid_warn(hid, 1046 "unsupported Resolution Multiplier unit exponent %d\n", 1047 multiplier->unit_exponent); 1048 } 1049 1050 /* There are no devices with an effective multiplier > 255 */ 1051 if (unlikely(m == 0 || m > 255 || m < -255)) { 1052 hid_warn(hid, "unsupported Resolution Multiplier %d\n", m); 1053 m = 1; 1054 } 1055 1056 return m; 1057 } 1058 1059 static void hid_apply_multiplier_to_field(struct hid_device *hid, 1060 struct hid_field *field, 1061 struct hid_collection *multiplier_collection, 1062 int effective_multiplier) 1063 { 1064 struct hid_collection *collection; 1065 struct hid_usage *usage; 1066 int i; 1067 1068 /* 1069 * If multiplier_collection is NULL, the multiplier applies 1070 * to all fields in the report. 1071 * Otherwise, it is the Logical Collection the multiplier applies to 1072 * but our field may be in a subcollection of that collection. 1073 */ 1074 for (i = 0; i < field->maxusage; i++) { 1075 usage = &field->usage[i]; 1076 1077 collection = &hid->collection[usage->collection_index]; 1078 while (collection->parent_idx != -1 && 1079 collection != multiplier_collection) 1080 collection = &hid->collection[collection->parent_idx]; 1081 1082 if (collection->parent_idx != -1 || 1083 multiplier_collection == NULL) 1084 usage->resolution_multiplier = effective_multiplier; 1085 1086 } 1087 } 1088 1089 static void hid_apply_multiplier(struct hid_device *hid, 1090 struct hid_field *multiplier) 1091 { 1092 struct hid_report_enum *rep_enum; 1093 struct hid_report *rep; 1094 struct hid_field *field; 1095 struct hid_collection *multiplier_collection; 1096 int effective_multiplier; 1097 int i; 1098 1099 /* 1100 * "The Resolution Multiplier control must be contained in the same 1101 * Logical Collection as the control(s) to which it is to be applied. 1102 * If no Resolution Multiplier is defined, then the Resolution 1103 * Multiplier defaults to 1. If more than one control exists in a 1104 * Logical Collection, the Resolution Multiplier is associated with 1105 * all controls in the collection. If no Logical Collection is 1106 * defined, the Resolution Multiplier is associated with all 1107 * controls in the report." 1108 * HID Usage Table, v1.12, Section 4.3.1, p30 1109 * 1110 * Thus, search from the current collection upwards until we find a 1111 * logical collection. Then search all fields for that same parent 1112 * collection. Those are the fields the multiplier applies to. 1113 * 1114 * If we have more than one multiplier, it will overwrite the 1115 * applicable fields later. 1116 */ 1117 multiplier_collection = &hid->collection[multiplier->usage->collection_index]; 1118 while (multiplier_collection->parent_idx != -1 && 1119 multiplier_collection->type != HID_COLLECTION_LOGICAL) 1120 multiplier_collection = &hid->collection[multiplier_collection->parent_idx]; 1121 1122 effective_multiplier = hid_calculate_multiplier(hid, multiplier); 1123 1124 rep_enum = &hid->report_enum[HID_INPUT_REPORT]; 1125 list_for_each_entry(rep, &rep_enum->report_list, list) { 1126 for (i = 0; i < rep->maxfield; i++) { 1127 field = rep->field[i]; 1128 hid_apply_multiplier_to_field(hid, field, 1129 multiplier_collection, 1130 effective_multiplier); 1131 } 1132 } 1133 } 1134 1135 /* 1136 * hid_setup_resolution_multiplier - set up all resolution multipliers 1137 * 1138 * @device: hid device 1139 * 1140 * Search for all Resolution Multiplier Feature Reports and apply their 1141 * value to all matching Input items. This only updates the internal struct 1142 * fields. 1143 * 1144 * The Resolution Multiplier is applied by the hardware. If the multiplier 1145 * is anything other than 1, the hardware will send pre-multiplied events 1146 * so that the same physical interaction generates an accumulated 1147 * accumulated_value = value * * multiplier 1148 * This may be achieved by sending 1149 * - "value * multiplier" for each event, or 1150 * - "value" but "multiplier" times as frequently, or 1151 * - a combination of the above 1152 * The only guarantee is that the same physical interaction always generates 1153 * an accumulated 'value * multiplier'. 1154 * 1155 * This function must be called before any event processing and after 1156 * any SetRequest to the Resolution Multiplier. 1157 */ 1158 void hid_setup_resolution_multiplier(struct hid_device *hid) 1159 { 1160 struct hid_report_enum *rep_enum; 1161 struct hid_report *rep; 1162 struct hid_usage *usage; 1163 int i, j; 1164 1165 rep_enum = &hid->report_enum[HID_FEATURE_REPORT]; 1166 list_for_each_entry(rep, &rep_enum->report_list, list) { 1167 for (i = 0; i < rep->maxfield; i++) { 1168 /* Ignore if report count is out of bounds. */ 1169 if (rep->field[i]->report_count < 1) 1170 continue; 1171 1172 for (j = 0; j < rep->field[i]->maxusage; j++) { 1173 usage = &rep->field[i]->usage[j]; 1174 if (usage->hid == HID_GD_RESOLUTION_MULTIPLIER) 1175 hid_apply_multiplier(hid, 1176 rep->field[i]); 1177 } 1178 } 1179 } 1180 } 1181 EXPORT_SYMBOL_GPL(hid_setup_resolution_multiplier); 1182 1183 /** 1184 * hid_open_report - open a driver-specific device report 1185 * 1186 * @device: hid device 1187 * 1188 * Parse a report description into a hid_device structure. Reports are 1189 * enumerated, fields are attached to these reports. 1190 * 0 returned on success, otherwise nonzero error value. 1191 * 1192 * This function (or the equivalent hid_parse() macro) should only be 1193 * called from probe() in drivers, before starting the device. 1194 */ 1195 int hid_open_report(struct hid_device *device) 1196 { 1197 struct hid_parser *parser; 1198 struct hid_item item; 1199 unsigned int size; 1200 __u8 *start; 1201 __u8 *buf; 1202 __u8 *end; 1203 __u8 *next; 1204 int ret; 1205 static int (*dispatch_type[])(struct hid_parser *parser, 1206 struct hid_item *item) = { 1207 hid_parser_main, 1208 hid_parser_global, 1209 hid_parser_local, 1210 hid_parser_reserved 1211 }; 1212 1213 if (WARN_ON(device->status & HID_STAT_PARSED)) 1214 return -EBUSY; 1215 1216 start = device->dev_rdesc; 1217 if (WARN_ON(!start)) 1218 return -ENODEV; 1219 size = device->dev_rsize; 1220 1221 /* call_hid_bpf_rdesc_fixup() ensures we work on a copy of rdesc */ 1222 buf = call_hid_bpf_rdesc_fixup(device, start, &size); 1223 if (buf == NULL) 1224 return -ENOMEM; 1225 1226 if (device->driver->report_fixup) 1227 start = device->driver->report_fixup(device, buf, &size); 1228 else 1229 start = buf; 1230 1231 start = kmemdup(start, size, GFP_KERNEL); 1232 kfree(buf); 1233 if (start == NULL) 1234 return -ENOMEM; 1235 1236 device->rdesc = start; 1237 device->rsize = size; 1238 1239 parser = vzalloc(sizeof(struct hid_parser)); 1240 if (!parser) { 1241 ret = -ENOMEM; 1242 goto alloc_err; 1243 } 1244 1245 parser->device = device; 1246 1247 end = start + size; 1248 1249 device->collection = kcalloc(HID_DEFAULT_NUM_COLLECTIONS, 1250 sizeof(struct hid_collection), GFP_KERNEL); 1251 if (!device->collection) { 1252 ret = -ENOMEM; 1253 goto err; 1254 } 1255 device->collection_size = HID_DEFAULT_NUM_COLLECTIONS; 1256 1257 ret = -EINVAL; 1258 while ((next = fetch_item(start, end, &item)) != NULL) { 1259 start = next; 1260 1261 if (item.format != HID_ITEM_FORMAT_SHORT) { 1262 hid_err(device, "unexpected long global item\n"); 1263 goto err; 1264 } 1265 1266 if (dispatch_type[item.type](parser, &item)) { 1267 hid_err(device, "item %u %u %u %u parsing failed\n", 1268 item.format, (unsigned)item.size, 1269 (unsigned)item.type, (unsigned)item.tag); 1270 goto err; 1271 } 1272 1273 if (start == end) { 1274 if (parser->collection_stack_ptr) { 1275 hid_err(device, "unbalanced collection at end of report description\n"); 1276 goto err; 1277 } 1278 if (parser->local.delimiter_depth) { 1279 hid_err(device, "unbalanced delimiter at end of report description\n"); 1280 goto err; 1281 } 1282 1283 /* 1284 * fetch initial values in case the device's 1285 * default multiplier isn't the recommended 1 1286 */ 1287 hid_setup_resolution_multiplier(device); 1288 1289 kfree(parser->collection_stack); 1290 vfree(parser); 1291 device->status |= HID_STAT_PARSED; 1292 1293 return 0; 1294 } 1295 } 1296 1297 hid_err(device, "item fetching failed at offset %u/%u\n", 1298 size - (unsigned int)(end - start), size); 1299 err: 1300 kfree(parser->collection_stack); 1301 alloc_err: 1302 vfree(parser); 1303 hid_close_report(device); 1304 return ret; 1305 } 1306 EXPORT_SYMBOL_GPL(hid_open_report); 1307 1308 /* 1309 * Convert a signed n-bit integer to signed 32-bit integer. Common 1310 * cases are done through the compiler, the screwed things has to be 1311 * done by hand. 1312 */ 1313 1314 static s32 snto32(__u32 value, unsigned n) 1315 { 1316 if (!value || !n) 1317 return 0; 1318 1319 switch (n) { 1320 case 8: return ((__s8)value); 1321 case 16: return ((__s16)value); 1322 case 32: return ((__s32)value); 1323 } 1324 return value & (1 << (n - 1)) ? value | (~0U << n) : value; 1325 } 1326 1327 s32 hid_snto32(__u32 value, unsigned n) 1328 { 1329 return snto32(value, n); 1330 } 1331 EXPORT_SYMBOL_GPL(hid_snto32); 1332 1333 /* 1334 * Convert a signed 32-bit integer to a signed n-bit integer. 1335 */ 1336 1337 static u32 s32ton(__s32 value, unsigned n) 1338 { 1339 s32 a = value >> (n - 1); 1340 if (a && a != -1) 1341 return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1; 1342 return value & ((1 << n) - 1); 1343 } 1344 1345 /* 1346 * Extract/implement a data field from/to a little endian report (bit array). 1347 * 1348 * Code sort-of follows HID spec: 1349 * http://www.usb.org/developers/hidpage/HID1_11.pdf 1350 * 1351 * While the USB HID spec allows unlimited length bit fields in "report 1352 * descriptors", most devices never use more than 16 bits. 1353 * One model of UPS is claimed to report "LINEV" as a 32-bit field. 1354 * Search linux-kernel and linux-usb-devel archives for "hid-core extract". 1355 */ 1356 1357 static u32 __extract(u8 *report, unsigned offset, int n) 1358 { 1359 unsigned int idx = offset / 8; 1360 unsigned int bit_nr = 0; 1361 unsigned int bit_shift = offset % 8; 1362 int bits_to_copy = 8 - bit_shift; 1363 u32 value = 0; 1364 u32 mask = n < 32 ? (1U << n) - 1 : ~0U; 1365 1366 while (n > 0) { 1367 value |= ((u32)report[idx] >> bit_shift) << bit_nr; 1368 n -= bits_to_copy; 1369 bit_nr += bits_to_copy; 1370 bits_to_copy = 8; 1371 bit_shift = 0; 1372 idx++; 1373 } 1374 1375 return value & mask; 1376 } 1377 1378 u32 hid_field_extract(const struct hid_device *hid, u8 *report, 1379 unsigned offset, unsigned n) 1380 { 1381 if (n > 32) { 1382 hid_warn_once(hid, "%s() called with n (%d) > 32! (%s)\n", 1383 __func__, n, current->comm); 1384 n = 32; 1385 } 1386 1387 return __extract(report, offset, n); 1388 } 1389 EXPORT_SYMBOL_GPL(hid_field_extract); 1390 1391 /* 1392 * "implement" : set bits in a little endian bit stream. 1393 * Same concepts as "extract" (see comments above). 1394 * The data mangled in the bit stream remains in little endian 1395 * order the whole time. It make more sense to talk about 1396 * endianness of register values by considering a register 1397 * a "cached" copy of the little endian bit stream. 1398 */ 1399 1400 static void __implement(u8 *report, unsigned offset, int n, u32 value) 1401 { 1402 unsigned int idx = offset / 8; 1403 unsigned int bit_shift = offset % 8; 1404 int bits_to_set = 8 - bit_shift; 1405 1406 while (n - bits_to_set >= 0) { 1407 report[idx] &= ~(0xff << bit_shift); 1408 report[idx] |= value << bit_shift; 1409 value >>= bits_to_set; 1410 n -= bits_to_set; 1411 bits_to_set = 8; 1412 bit_shift = 0; 1413 idx++; 1414 } 1415 1416 /* last nibble */ 1417 if (n) { 1418 u8 bit_mask = ((1U << n) - 1); 1419 report[idx] &= ~(bit_mask << bit_shift); 1420 report[idx] |= value << bit_shift; 1421 } 1422 } 1423 1424 static void implement(const struct hid_device *hid, u8 *report, 1425 unsigned offset, unsigned n, u32 value) 1426 { 1427 if (unlikely(n > 32)) { 1428 hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n", 1429 __func__, n, current->comm); 1430 n = 32; 1431 } else if (n < 32) { 1432 u32 m = (1U << n) - 1; 1433 1434 if (unlikely(value > m)) { 1435 hid_warn(hid, 1436 "%s() called with too large value %d (n: %d)! (%s)\n", 1437 __func__, value, n, current->comm); 1438 WARN_ON(1); 1439 value &= m; 1440 } 1441 } 1442 1443 __implement(report, offset, n, value); 1444 } 1445 1446 /* 1447 * Search an array for a value. 1448 */ 1449 1450 static int search(__s32 *array, __s32 value, unsigned n) 1451 { 1452 while (n--) { 1453 if (*array++ == value) 1454 return 0; 1455 } 1456 return -1; 1457 } 1458 1459 /** 1460 * hid_match_report - check if driver's raw_event should be called 1461 * 1462 * @hid: hid device 1463 * @report: hid report to match against 1464 * 1465 * compare hid->driver->report_table->report_type to report->type 1466 */ 1467 static int hid_match_report(struct hid_device *hid, struct hid_report *report) 1468 { 1469 const struct hid_report_id *id = hid->driver->report_table; 1470 1471 if (!id) /* NULL means all */ 1472 return 1; 1473 1474 for (; id->report_type != HID_TERMINATOR; id++) 1475 if (id->report_type == HID_ANY_ID || 1476 id->report_type == report->type) 1477 return 1; 1478 return 0; 1479 } 1480 1481 /** 1482 * hid_match_usage - check if driver's event should be called 1483 * 1484 * @hid: hid device 1485 * @usage: usage to match against 1486 * 1487 * compare hid->driver->usage_table->usage_{type,code} to 1488 * usage->usage_{type,code} 1489 */ 1490 static int hid_match_usage(struct hid_device *hid, struct hid_usage *usage) 1491 { 1492 const struct hid_usage_id *id = hid->driver->usage_table; 1493 1494 if (!id) /* NULL means all */ 1495 return 1; 1496 1497 for (; id->usage_type != HID_ANY_ID - 1; id++) 1498 if ((id->usage_hid == HID_ANY_ID || 1499 id->usage_hid == usage->hid) && 1500 (id->usage_type == HID_ANY_ID || 1501 id->usage_type == usage->type) && 1502 (id->usage_code == HID_ANY_ID || 1503 id->usage_code == usage->code)) 1504 return 1; 1505 return 0; 1506 } 1507 1508 static void hid_process_event(struct hid_device *hid, struct hid_field *field, 1509 struct hid_usage *usage, __s32 value, int interrupt) 1510 { 1511 struct hid_driver *hdrv = hid->driver; 1512 int ret; 1513 1514 if (!list_empty(&hid->debug_list)) 1515 hid_dump_input(hid, usage, value); 1516 1517 if (hdrv && hdrv->event && hid_match_usage(hid, usage)) { 1518 ret = hdrv->event(hid, field, usage, value); 1519 if (ret != 0) { 1520 if (ret < 0) 1521 hid_err(hid, "%s's event failed with %d\n", 1522 hdrv->name, ret); 1523 return; 1524 } 1525 } 1526 1527 if (hid->claimed & HID_CLAIMED_INPUT) 1528 hidinput_hid_event(hid, field, usage, value); 1529 if (hid->claimed & HID_CLAIMED_HIDDEV && interrupt && hid->hiddev_hid_event) 1530 hid->hiddev_hid_event(hid, field, usage, value); 1531 } 1532 1533 /* 1534 * Checks if the given value is valid within this field 1535 */ 1536 static inline int hid_array_value_is_valid(struct hid_field *field, 1537 __s32 value) 1538 { 1539 __s32 min = field->logical_minimum; 1540 1541 /* 1542 * Value needs to be between logical min and max, and 1543 * (value - min) is used as an index in the usage array. 1544 * This array is of size field->maxusage 1545 */ 1546 return value >= min && 1547 value <= field->logical_maximum && 1548 value - min < field->maxusage; 1549 } 1550 1551 /* 1552 * Fetch the field from the data. The field content is stored for next 1553 * report processing (we do differential reporting to the layer). 1554 */ 1555 static void hid_input_fetch_field(struct hid_device *hid, 1556 struct hid_field *field, 1557 __u8 *data) 1558 { 1559 unsigned n; 1560 unsigned count = field->report_count; 1561 unsigned offset = field->report_offset; 1562 unsigned size = field->report_size; 1563 __s32 min = field->logical_minimum; 1564 __s32 *value; 1565 1566 value = field->new_value; 1567 memset(value, 0, count * sizeof(__s32)); 1568 field->ignored = false; 1569 1570 for (n = 0; n < count; n++) { 1571 1572 value[n] = min < 0 ? 1573 snto32(hid_field_extract(hid, data, offset + n * size, 1574 size), size) : 1575 hid_field_extract(hid, data, offset + n * size, size); 1576 1577 /* Ignore report if ErrorRollOver */ 1578 if (!(field->flags & HID_MAIN_ITEM_VARIABLE) && 1579 hid_array_value_is_valid(field, value[n]) && 1580 field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1) { 1581 field->ignored = true; 1582 return; 1583 } 1584 } 1585 } 1586 1587 /* 1588 * Process a received variable field. 1589 */ 1590 1591 static void hid_input_var_field(struct hid_device *hid, 1592 struct hid_field *field, 1593 int interrupt) 1594 { 1595 unsigned int count = field->report_count; 1596 __s32 *value = field->new_value; 1597 unsigned int n; 1598 1599 for (n = 0; n < count; n++) 1600 hid_process_event(hid, 1601 field, 1602 &field->usage[n], 1603 value[n], 1604 interrupt); 1605 1606 memcpy(field->value, value, count * sizeof(__s32)); 1607 } 1608 1609 /* 1610 * Process a received array field. The field content is stored for 1611 * next report processing (we do differential reporting to the layer). 1612 */ 1613 1614 static void hid_input_array_field(struct hid_device *hid, 1615 struct hid_field *field, 1616 int interrupt) 1617 { 1618 unsigned int n; 1619 unsigned int count = field->report_count; 1620 __s32 min = field->logical_minimum; 1621 __s32 *value; 1622 1623 value = field->new_value; 1624 1625 /* ErrorRollOver */ 1626 if (field->ignored) 1627 return; 1628 1629 for (n = 0; n < count; n++) { 1630 if (hid_array_value_is_valid(field, field->value[n]) && 1631 search(value, field->value[n], count)) 1632 hid_process_event(hid, 1633 field, 1634 &field->usage[field->value[n] - min], 1635 0, 1636 interrupt); 1637 1638 if (hid_array_value_is_valid(field, value[n]) && 1639 search(field->value, value[n], count)) 1640 hid_process_event(hid, 1641 field, 1642 &field->usage[value[n] - min], 1643 1, 1644 interrupt); 1645 } 1646 1647 memcpy(field->value, value, count * sizeof(__s32)); 1648 } 1649 1650 /* 1651 * Analyse a received report, and fetch the data from it. The field 1652 * content is stored for next report processing (we do differential 1653 * reporting to the layer). 1654 */ 1655 static void hid_process_report(struct hid_device *hid, 1656 struct hid_report *report, 1657 __u8 *data, 1658 int interrupt) 1659 { 1660 unsigned int a; 1661 struct hid_field_entry *entry; 1662 struct hid_field *field; 1663 1664 /* first retrieve all incoming values in data */ 1665 for (a = 0; a < report->maxfield; a++) 1666 hid_input_fetch_field(hid, report->field[a], data); 1667 1668 if (!list_empty(&report->field_entry_list)) { 1669 /* INPUT_REPORT, we have a priority list of fields */ 1670 list_for_each_entry(entry, 1671 &report->field_entry_list, 1672 list) { 1673 field = entry->field; 1674 1675 if (field->flags & HID_MAIN_ITEM_VARIABLE) 1676 hid_process_event(hid, 1677 field, 1678 &field->usage[entry->index], 1679 field->new_value[entry->index], 1680 interrupt); 1681 else 1682 hid_input_array_field(hid, field, interrupt); 1683 } 1684 1685 /* we need to do the memcpy at the end for var items */ 1686 for (a = 0; a < report->maxfield; a++) { 1687 field = report->field[a]; 1688 1689 if (field->flags & HID_MAIN_ITEM_VARIABLE) 1690 memcpy(field->value, field->new_value, 1691 field->report_count * sizeof(__s32)); 1692 } 1693 } else { 1694 /* FEATURE_REPORT, regular processing */ 1695 for (a = 0; a < report->maxfield; a++) { 1696 field = report->field[a]; 1697 1698 if (field->flags & HID_MAIN_ITEM_VARIABLE) 1699 hid_input_var_field(hid, field, interrupt); 1700 else 1701 hid_input_array_field(hid, field, interrupt); 1702 } 1703 } 1704 } 1705 1706 /* 1707 * Insert a given usage_index in a field in the list 1708 * of processed usages in the report. 1709 * 1710 * The elements of lower priority score are processed 1711 * first. 1712 */ 1713 static void __hid_insert_field_entry(struct hid_device *hid, 1714 struct hid_report *report, 1715 struct hid_field_entry *entry, 1716 struct hid_field *field, 1717 unsigned int usage_index) 1718 { 1719 struct hid_field_entry *next; 1720 1721 entry->field = field; 1722 entry->index = usage_index; 1723 entry->priority = field->usages_priorities[usage_index]; 1724 1725 /* insert the element at the correct position */ 1726 list_for_each_entry(next, 1727 &report->field_entry_list, 1728 list) { 1729 /* 1730 * the priority of our element is strictly higher 1731 * than the next one, insert it before 1732 */ 1733 if (entry->priority > next->priority) { 1734 list_add_tail(&entry->list, &next->list); 1735 return; 1736 } 1737 } 1738 1739 /* lowest priority score: insert at the end */ 1740 list_add_tail(&entry->list, &report->field_entry_list); 1741 } 1742 1743 static void hid_report_process_ordering(struct hid_device *hid, 1744 struct hid_report *report) 1745 { 1746 struct hid_field *field; 1747 struct hid_field_entry *entries; 1748 unsigned int a, u, usages; 1749 unsigned int count = 0; 1750 1751 /* count the number of individual fields in the report */ 1752 for (a = 0; a < report->maxfield; a++) { 1753 field = report->field[a]; 1754 1755 if (field->flags & HID_MAIN_ITEM_VARIABLE) 1756 count += field->report_count; 1757 else 1758 count++; 1759 } 1760 1761 /* allocate the memory to process the fields */ 1762 entries = kcalloc(count, sizeof(*entries), GFP_KERNEL); 1763 if (!entries) 1764 return; 1765 1766 report->field_entries = entries; 1767 1768 /* 1769 * walk through all fields in the report and 1770 * store them by priority order in report->field_entry_list 1771 * 1772 * - Var elements are individualized (field + usage_index) 1773 * - Arrays are taken as one, we can not chose an order for them 1774 */ 1775 usages = 0; 1776 for (a = 0; a < report->maxfield; a++) { 1777 field = report->field[a]; 1778 1779 if (field->flags & HID_MAIN_ITEM_VARIABLE) { 1780 for (u = 0; u < field->report_count; u++) { 1781 __hid_insert_field_entry(hid, report, 1782 &entries[usages], 1783 field, u); 1784 usages++; 1785 } 1786 } else { 1787 __hid_insert_field_entry(hid, report, &entries[usages], 1788 field, 0); 1789 usages++; 1790 } 1791 } 1792 } 1793 1794 static void hid_process_ordering(struct hid_device *hid) 1795 { 1796 struct hid_report *report; 1797 struct hid_report_enum *report_enum = &hid->report_enum[HID_INPUT_REPORT]; 1798 1799 list_for_each_entry(report, &report_enum->report_list, list) 1800 hid_report_process_ordering(hid, report); 1801 } 1802 1803 /* 1804 * Output the field into the report. 1805 */ 1806 1807 static void hid_output_field(const struct hid_device *hid, 1808 struct hid_field *field, __u8 *data) 1809 { 1810 unsigned count = field->report_count; 1811 unsigned offset = field->report_offset; 1812 unsigned size = field->report_size; 1813 unsigned n; 1814 1815 for (n = 0; n < count; n++) { 1816 if (field->logical_minimum < 0) /* signed values */ 1817 implement(hid, data, offset + n * size, size, 1818 s32ton(field->value[n], size)); 1819 else /* unsigned values */ 1820 implement(hid, data, offset + n * size, size, 1821 field->value[n]); 1822 } 1823 } 1824 1825 /* 1826 * Compute the size of a report. 1827 */ 1828 static size_t hid_compute_report_size(struct hid_report *report) 1829 { 1830 if (report->size) 1831 return ((report->size - 1) >> 3) + 1; 1832 1833 return 0; 1834 } 1835 1836 /* 1837 * Create a report. 'data' has to be allocated using 1838 * hid_alloc_report_buf() so that it has proper size. 1839 */ 1840 1841 void hid_output_report(struct hid_report *report, __u8 *data) 1842 { 1843 unsigned n; 1844 1845 if (report->id > 0) 1846 *data++ = report->id; 1847 1848 memset(data, 0, hid_compute_report_size(report)); 1849 for (n = 0; n < report->maxfield; n++) 1850 hid_output_field(report->device, report->field[n], data); 1851 } 1852 EXPORT_SYMBOL_GPL(hid_output_report); 1853 1854 /* 1855 * Allocator for buffer that is going to be passed to hid_output_report() 1856 */ 1857 u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags) 1858 { 1859 /* 1860 * 7 extra bytes are necessary to achieve proper functionality 1861 * of implement() working on 8 byte chunks 1862 */ 1863 1864 u32 len = hid_report_len(report) + 7; 1865 1866 return kmalloc(len, flags); 1867 } 1868 EXPORT_SYMBOL_GPL(hid_alloc_report_buf); 1869 1870 /* 1871 * Set a field value. The report this field belongs to has to be 1872 * created and transferred to the device, to set this value in the 1873 * device. 1874 */ 1875 1876 int hid_set_field(struct hid_field *field, unsigned offset, __s32 value) 1877 { 1878 unsigned size; 1879 1880 if (!field) 1881 return -1; 1882 1883 size = field->report_size; 1884 1885 hid_dump_input(field->report->device, field->usage + offset, value); 1886 1887 if (offset >= field->report_count) { 1888 hid_err(field->report->device, "offset (%d) exceeds report_count (%d)\n", 1889 offset, field->report_count); 1890 return -1; 1891 } 1892 if (field->logical_minimum < 0) { 1893 if (value != snto32(s32ton(value, size), size)) { 1894 hid_err(field->report->device, "value %d is out of range\n", value); 1895 return -1; 1896 } 1897 } 1898 field->value[offset] = value; 1899 return 0; 1900 } 1901 EXPORT_SYMBOL_GPL(hid_set_field); 1902 1903 static struct hid_report *hid_get_report(struct hid_report_enum *report_enum, 1904 const u8 *data) 1905 { 1906 struct hid_report *report; 1907 unsigned int n = 0; /* Normally report number is 0 */ 1908 1909 /* Device uses numbered reports, data[0] is report number */ 1910 if (report_enum->numbered) 1911 n = *data; 1912 1913 report = report_enum->report_id_hash[n]; 1914 if (report == NULL) 1915 dbg_hid("undefined report_id %u received\n", n); 1916 1917 return report; 1918 } 1919 1920 /* 1921 * Implement a generic .request() callback, using .raw_request() 1922 * DO NOT USE in hid drivers directly, but through hid_hw_request instead. 1923 */ 1924 int __hid_request(struct hid_device *hid, struct hid_report *report, 1925 enum hid_class_request reqtype) 1926 { 1927 char *buf; 1928 int ret; 1929 u32 len; 1930 1931 buf = hid_alloc_report_buf(report, GFP_KERNEL); 1932 if (!buf) 1933 return -ENOMEM; 1934 1935 len = hid_report_len(report); 1936 1937 if (reqtype == HID_REQ_SET_REPORT) 1938 hid_output_report(report, buf); 1939 1940 ret = hid->ll_driver->raw_request(hid, report->id, buf, len, 1941 report->type, reqtype); 1942 if (ret < 0) { 1943 dbg_hid("unable to complete request: %d\n", ret); 1944 goto out; 1945 } 1946 1947 if (reqtype == HID_REQ_GET_REPORT) 1948 hid_input_report(hid, report->type, buf, ret, 0); 1949 1950 ret = 0; 1951 1952 out: 1953 kfree(buf); 1954 return ret; 1955 } 1956 EXPORT_SYMBOL_GPL(__hid_request); 1957 1958 int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size, 1959 int interrupt) 1960 { 1961 struct hid_report_enum *report_enum = hid->report_enum + type; 1962 struct hid_report *report; 1963 struct hid_driver *hdrv; 1964 u32 rsize, csize = size; 1965 u8 *cdata = data; 1966 int ret = 0; 1967 1968 report = hid_get_report(report_enum, data); 1969 if (!report) 1970 goto out; 1971 1972 if (report_enum->numbered) { 1973 cdata++; 1974 csize--; 1975 } 1976 1977 rsize = hid_compute_report_size(report); 1978 1979 if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE) 1980 rsize = HID_MAX_BUFFER_SIZE - 1; 1981 else if (rsize > HID_MAX_BUFFER_SIZE) 1982 rsize = HID_MAX_BUFFER_SIZE; 1983 1984 if (csize < rsize) { 1985 dbg_hid("report %d is too short, (%d < %d)\n", report->id, 1986 csize, rsize); 1987 memset(cdata + csize, 0, rsize - csize); 1988 } 1989 1990 if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event) 1991 hid->hiddev_report_event(hid, report); 1992 if (hid->claimed & HID_CLAIMED_HIDRAW) { 1993 ret = hidraw_report_event(hid, data, size); 1994 if (ret) 1995 goto out; 1996 } 1997 1998 if (hid->claimed != HID_CLAIMED_HIDRAW && report->maxfield) { 1999 hid_process_report(hid, report, cdata, interrupt); 2000 hdrv = hid->driver; 2001 if (hdrv && hdrv->report) 2002 hdrv->report(hid, report); 2003 } 2004 2005 if (hid->claimed & HID_CLAIMED_INPUT) 2006 hidinput_report_event(hid, report); 2007 out: 2008 return ret; 2009 } 2010 EXPORT_SYMBOL_GPL(hid_report_raw_event); 2011 2012 /** 2013 * hid_input_report - report data from lower layer (usb, bt...) 2014 * 2015 * @hid: hid device 2016 * @type: HID report type (HID_*_REPORT) 2017 * @data: report contents 2018 * @size: size of data parameter 2019 * @interrupt: distinguish between interrupt and control transfers 2020 * 2021 * This is data entry for lower layers. 2022 */ 2023 int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size, 2024 int interrupt) 2025 { 2026 struct hid_report_enum *report_enum; 2027 struct hid_driver *hdrv; 2028 struct hid_report *report; 2029 int ret = 0; 2030 2031 if (!hid) 2032 return -ENODEV; 2033 2034 if (down_trylock(&hid->driver_input_lock)) 2035 return -EBUSY; 2036 2037 if (!hid->driver) { 2038 ret = -ENODEV; 2039 goto unlock; 2040 } 2041 report_enum = hid->report_enum + type; 2042 hdrv = hid->driver; 2043 2044 data = dispatch_hid_bpf_device_event(hid, type, data, &size, interrupt); 2045 if (IS_ERR(data)) { 2046 ret = PTR_ERR(data); 2047 goto unlock; 2048 } 2049 2050 if (!size) { 2051 dbg_hid("empty report\n"); 2052 ret = -1; 2053 goto unlock; 2054 } 2055 2056 /* Avoid unnecessary overhead if debugfs is disabled */ 2057 if (!list_empty(&hid->debug_list)) 2058 hid_dump_report(hid, type, data, size); 2059 2060 report = hid_get_report(report_enum, data); 2061 2062 if (!report) { 2063 ret = -1; 2064 goto unlock; 2065 } 2066 2067 if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) { 2068 ret = hdrv->raw_event(hid, report, data, size); 2069 if (ret < 0) 2070 goto unlock; 2071 } 2072 2073 ret = hid_report_raw_event(hid, type, data, size, interrupt); 2074 2075 unlock: 2076 up(&hid->driver_input_lock); 2077 return ret; 2078 } 2079 EXPORT_SYMBOL_GPL(hid_input_report); 2080 2081 bool hid_match_one_id(const struct hid_device *hdev, 2082 const struct hid_device_id *id) 2083 { 2084 return (id->bus == HID_BUS_ANY || id->bus == hdev->bus) && 2085 (id->group == HID_GROUP_ANY || id->group == hdev->group) && 2086 (id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) && 2087 (id->product == HID_ANY_ID || id->product == hdev->product); 2088 } 2089 2090 const struct hid_device_id *hid_match_id(const struct hid_device *hdev, 2091 const struct hid_device_id *id) 2092 { 2093 for (; id->bus; id++) 2094 if (hid_match_one_id(hdev, id)) 2095 return id; 2096 2097 return NULL; 2098 } 2099 EXPORT_SYMBOL_GPL(hid_match_id); 2100 2101 static const struct hid_device_id hid_hiddev_list[] = { 2102 { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS) }, 2103 { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS1) }, 2104 { } 2105 }; 2106 2107 static bool hid_hiddev(struct hid_device *hdev) 2108 { 2109 return !!hid_match_id(hdev, hid_hiddev_list); 2110 } 2111 2112 2113 static ssize_t 2114 read_report_descriptor(struct file *filp, struct kobject *kobj, 2115 struct bin_attribute *attr, 2116 char *buf, loff_t off, size_t count) 2117 { 2118 struct device *dev = kobj_to_dev(kobj); 2119 struct hid_device *hdev = to_hid_device(dev); 2120 2121 if (off >= hdev->rsize) 2122 return 0; 2123 2124 if (off + count > hdev->rsize) 2125 count = hdev->rsize - off; 2126 2127 memcpy(buf, hdev->rdesc + off, count); 2128 2129 return count; 2130 } 2131 2132 static ssize_t 2133 show_country(struct device *dev, struct device_attribute *attr, 2134 char *buf) 2135 { 2136 struct hid_device *hdev = to_hid_device(dev); 2137 2138 return sprintf(buf, "%02x\n", hdev->country & 0xff); 2139 } 2140 2141 static struct bin_attribute dev_bin_attr_report_desc = { 2142 .attr = { .name = "report_descriptor", .mode = 0444 }, 2143 .read = read_report_descriptor, 2144 .size = HID_MAX_DESCRIPTOR_SIZE, 2145 }; 2146 2147 static const struct device_attribute dev_attr_country = { 2148 .attr = { .name = "country", .mode = 0444 }, 2149 .show = show_country, 2150 }; 2151 2152 int hid_connect(struct hid_device *hdev, unsigned int connect_mask) 2153 { 2154 static const char *types[] = { "Device", "Pointer", "Mouse", "Device", 2155 "Joystick", "Gamepad", "Keyboard", "Keypad", 2156 "Multi-Axis Controller" 2157 }; 2158 const char *type, *bus; 2159 char buf[64] = ""; 2160 unsigned int i; 2161 int len; 2162 int ret; 2163 2164 ret = hid_bpf_connect_device(hdev); 2165 if (ret) 2166 return ret; 2167 2168 if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE) 2169 connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV); 2170 if (hdev->quirks & HID_QUIRK_HIDINPUT_FORCE) 2171 connect_mask |= HID_CONNECT_HIDINPUT_FORCE; 2172 if (hdev->bus != BUS_USB) 2173 connect_mask &= ~HID_CONNECT_HIDDEV; 2174 if (hid_hiddev(hdev)) 2175 connect_mask |= HID_CONNECT_HIDDEV_FORCE; 2176 2177 if ((connect_mask & HID_CONNECT_HIDINPUT) && !hidinput_connect(hdev, 2178 connect_mask & HID_CONNECT_HIDINPUT_FORCE)) 2179 hdev->claimed |= HID_CLAIMED_INPUT; 2180 2181 if ((connect_mask & HID_CONNECT_HIDDEV) && hdev->hiddev_connect && 2182 !hdev->hiddev_connect(hdev, 2183 connect_mask & HID_CONNECT_HIDDEV_FORCE)) 2184 hdev->claimed |= HID_CLAIMED_HIDDEV; 2185 if ((connect_mask & HID_CONNECT_HIDRAW) && !hidraw_connect(hdev)) 2186 hdev->claimed |= HID_CLAIMED_HIDRAW; 2187 2188 if (connect_mask & HID_CONNECT_DRIVER) 2189 hdev->claimed |= HID_CLAIMED_DRIVER; 2190 2191 /* Drivers with the ->raw_event callback set are not required to connect 2192 * to any other listener. */ 2193 if (!hdev->claimed && !hdev->driver->raw_event) { 2194 hid_err(hdev, "device has no listeners, quitting\n"); 2195 return -ENODEV; 2196 } 2197 2198 hid_process_ordering(hdev); 2199 2200 if ((hdev->claimed & HID_CLAIMED_INPUT) && 2201 (connect_mask & HID_CONNECT_FF) && hdev->ff_init) 2202 hdev->ff_init(hdev); 2203 2204 len = 0; 2205 if (hdev->claimed & HID_CLAIMED_INPUT) 2206 len += sprintf(buf + len, "input"); 2207 if (hdev->claimed & HID_CLAIMED_HIDDEV) 2208 len += sprintf(buf + len, "%shiddev%d", len ? "," : "", 2209 ((struct hiddev *)hdev->hiddev)->minor); 2210 if (hdev->claimed & HID_CLAIMED_HIDRAW) 2211 len += sprintf(buf + len, "%shidraw%d", len ? "," : "", 2212 ((struct hidraw *)hdev->hidraw)->minor); 2213 2214 type = "Device"; 2215 for (i = 0; i < hdev->maxcollection; i++) { 2216 struct hid_collection *col = &hdev->collection[i]; 2217 if (col->type == HID_COLLECTION_APPLICATION && 2218 (col->usage & HID_USAGE_PAGE) == HID_UP_GENDESK && 2219 (col->usage & 0xffff) < ARRAY_SIZE(types)) { 2220 type = types[col->usage & 0xffff]; 2221 break; 2222 } 2223 } 2224 2225 switch (hdev->bus) { 2226 case BUS_USB: 2227 bus = "USB"; 2228 break; 2229 case BUS_BLUETOOTH: 2230 bus = "BLUETOOTH"; 2231 break; 2232 case BUS_I2C: 2233 bus = "I2C"; 2234 break; 2235 case BUS_VIRTUAL: 2236 bus = "VIRTUAL"; 2237 break; 2238 case BUS_INTEL_ISHTP: 2239 case BUS_AMD_SFH: 2240 bus = "SENSOR HUB"; 2241 break; 2242 default: 2243 bus = "<UNKNOWN>"; 2244 } 2245 2246 ret = device_create_file(&hdev->dev, &dev_attr_country); 2247 if (ret) 2248 hid_warn(hdev, 2249 "can't create sysfs country code attribute err: %d\n", ret); 2250 2251 hid_info(hdev, "%s: %s HID v%x.%02x %s [%s] on %s\n", 2252 buf, bus, hdev->version >> 8, hdev->version & 0xff, 2253 type, hdev->name, hdev->phys); 2254 2255 return 0; 2256 } 2257 EXPORT_SYMBOL_GPL(hid_connect); 2258 2259 void hid_disconnect(struct hid_device *hdev) 2260 { 2261 device_remove_file(&hdev->dev, &dev_attr_country); 2262 if (hdev->claimed & HID_CLAIMED_INPUT) 2263 hidinput_disconnect(hdev); 2264 if (hdev->claimed & HID_CLAIMED_HIDDEV) 2265 hdev->hiddev_disconnect(hdev); 2266 if (hdev->claimed & HID_CLAIMED_HIDRAW) 2267 hidraw_disconnect(hdev); 2268 hdev->claimed = 0; 2269 2270 hid_bpf_disconnect_device(hdev); 2271 } 2272 EXPORT_SYMBOL_GPL(hid_disconnect); 2273 2274 /** 2275 * hid_hw_start - start underlying HW 2276 * @hdev: hid device 2277 * @connect_mask: which outputs to connect, see HID_CONNECT_* 2278 * 2279 * Call this in probe function *after* hid_parse. This will setup HW 2280 * buffers and start the device (if not defeirred to device open). 2281 * hid_hw_stop must be called if this was successful. 2282 */ 2283 int hid_hw_start(struct hid_device *hdev, unsigned int connect_mask) 2284 { 2285 int error; 2286 2287 error = hdev->ll_driver->start(hdev); 2288 if (error) 2289 return error; 2290 2291 if (connect_mask) { 2292 error = hid_connect(hdev, connect_mask); 2293 if (error) { 2294 hdev->ll_driver->stop(hdev); 2295 return error; 2296 } 2297 } 2298 2299 return 0; 2300 } 2301 EXPORT_SYMBOL_GPL(hid_hw_start); 2302 2303 /** 2304 * hid_hw_stop - stop underlying HW 2305 * @hdev: hid device 2306 * 2307 * This is usually called from remove function or from probe when something 2308 * failed and hid_hw_start was called already. 2309 */ 2310 void hid_hw_stop(struct hid_device *hdev) 2311 { 2312 hid_disconnect(hdev); 2313 hdev->ll_driver->stop(hdev); 2314 } 2315 EXPORT_SYMBOL_GPL(hid_hw_stop); 2316 2317 /** 2318 * hid_hw_open - signal underlying HW to start delivering events 2319 * @hdev: hid device 2320 * 2321 * Tell underlying HW to start delivering events from the device. 2322 * This function should be called sometime after successful call 2323 * to hid_hw_start(). 2324 */ 2325 int hid_hw_open(struct hid_device *hdev) 2326 { 2327 int ret; 2328 2329 ret = mutex_lock_killable(&hdev->ll_open_lock); 2330 if (ret) 2331 return ret; 2332 2333 if (!hdev->ll_open_count++) { 2334 ret = hdev->ll_driver->open(hdev); 2335 if (ret) 2336 hdev->ll_open_count--; 2337 } 2338 2339 mutex_unlock(&hdev->ll_open_lock); 2340 return ret; 2341 } 2342 EXPORT_SYMBOL_GPL(hid_hw_open); 2343 2344 /** 2345 * hid_hw_close - signal underlaying HW to stop delivering events 2346 * 2347 * @hdev: hid device 2348 * 2349 * This function indicates that we are not interested in the events 2350 * from this device anymore. Delivery of events may or may not stop, 2351 * depending on the number of users still outstanding. 2352 */ 2353 void hid_hw_close(struct hid_device *hdev) 2354 { 2355 mutex_lock(&hdev->ll_open_lock); 2356 if (!--hdev->ll_open_count) 2357 hdev->ll_driver->close(hdev); 2358 mutex_unlock(&hdev->ll_open_lock); 2359 } 2360 EXPORT_SYMBOL_GPL(hid_hw_close); 2361 2362 /** 2363 * hid_hw_request - send report request to device 2364 * 2365 * @hdev: hid device 2366 * @report: report to send 2367 * @reqtype: hid request type 2368 */ 2369 void hid_hw_request(struct hid_device *hdev, 2370 struct hid_report *report, enum hid_class_request reqtype) 2371 { 2372 if (hdev->ll_driver->request) 2373 return hdev->ll_driver->request(hdev, report, reqtype); 2374 2375 __hid_request(hdev, report, reqtype); 2376 } 2377 EXPORT_SYMBOL_GPL(hid_hw_request); 2378 2379 /** 2380 * hid_hw_raw_request - send report request to device 2381 * 2382 * @hdev: hid device 2383 * @reportnum: report ID 2384 * @buf: in/out data to transfer 2385 * @len: length of buf 2386 * @rtype: HID report type 2387 * @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT 2388 * 2389 * Return: count of data transferred, negative if error 2390 * 2391 * Same behavior as hid_hw_request, but with raw buffers instead. 2392 */ 2393 int hid_hw_raw_request(struct hid_device *hdev, 2394 unsigned char reportnum, __u8 *buf, 2395 size_t len, enum hid_report_type rtype, enum hid_class_request reqtype) 2396 { 2397 if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf) 2398 return -EINVAL; 2399 2400 return hdev->ll_driver->raw_request(hdev, reportnum, buf, len, 2401 rtype, reqtype); 2402 } 2403 EXPORT_SYMBOL_GPL(hid_hw_raw_request); 2404 2405 /** 2406 * hid_hw_output_report - send output report to device 2407 * 2408 * @hdev: hid device 2409 * @buf: raw data to transfer 2410 * @len: length of buf 2411 * 2412 * Return: count of data transferred, negative if error 2413 */ 2414 int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len) 2415 { 2416 if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf) 2417 return -EINVAL; 2418 2419 if (hdev->ll_driver->output_report) 2420 return hdev->ll_driver->output_report(hdev, buf, len); 2421 2422 return -ENOSYS; 2423 } 2424 EXPORT_SYMBOL_GPL(hid_hw_output_report); 2425 2426 #ifdef CONFIG_PM 2427 int hid_driver_suspend(struct hid_device *hdev, pm_message_t state) 2428 { 2429 if (hdev->driver && hdev->driver->suspend) 2430 return hdev->driver->suspend(hdev, state); 2431 2432 return 0; 2433 } 2434 EXPORT_SYMBOL_GPL(hid_driver_suspend); 2435 2436 int hid_driver_reset_resume(struct hid_device *hdev) 2437 { 2438 if (hdev->driver && hdev->driver->reset_resume) 2439 return hdev->driver->reset_resume(hdev); 2440 2441 return 0; 2442 } 2443 EXPORT_SYMBOL_GPL(hid_driver_reset_resume); 2444 2445 int hid_driver_resume(struct hid_device *hdev) 2446 { 2447 if (hdev->driver && hdev->driver->resume) 2448 return hdev->driver->resume(hdev); 2449 2450 return 0; 2451 } 2452 EXPORT_SYMBOL_GPL(hid_driver_resume); 2453 #endif /* CONFIG_PM */ 2454 2455 struct hid_dynid { 2456 struct list_head list; 2457 struct hid_device_id id; 2458 }; 2459 2460 /** 2461 * new_id_store - add a new HID device ID to this driver and re-probe devices 2462 * @drv: target device driver 2463 * @buf: buffer for scanning device ID data 2464 * @count: input size 2465 * 2466 * Adds a new dynamic hid device ID to this driver, 2467 * and causes the driver to probe for all devices again. 2468 */ 2469 static ssize_t new_id_store(struct device_driver *drv, const char *buf, 2470 size_t count) 2471 { 2472 struct hid_driver *hdrv = to_hid_driver(drv); 2473 struct hid_dynid *dynid; 2474 __u32 bus, vendor, product; 2475 unsigned long driver_data = 0; 2476 int ret; 2477 2478 ret = sscanf(buf, "%x %x %x %lx", 2479 &bus, &vendor, &product, &driver_data); 2480 if (ret < 3) 2481 return -EINVAL; 2482 2483 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); 2484 if (!dynid) 2485 return -ENOMEM; 2486 2487 dynid->id.bus = bus; 2488 dynid->id.group = HID_GROUP_ANY; 2489 dynid->id.vendor = vendor; 2490 dynid->id.product = product; 2491 dynid->id.driver_data = driver_data; 2492 2493 spin_lock(&hdrv->dyn_lock); 2494 list_add_tail(&dynid->list, &hdrv->dyn_list); 2495 spin_unlock(&hdrv->dyn_lock); 2496 2497 ret = driver_attach(&hdrv->driver); 2498 2499 return ret ? : count; 2500 } 2501 static DRIVER_ATTR_WO(new_id); 2502 2503 static struct attribute *hid_drv_attrs[] = { 2504 &driver_attr_new_id.attr, 2505 NULL, 2506 }; 2507 ATTRIBUTE_GROUPS(hid_drv); 2508 2509 static void hid_free_dynids(struct hid_driver *hdrv) 2510 { 2511 struct hid_dynid *dynid, *n; 2512 2513 spin_lock(&hdrv->dyn_lock); 2514 list_for_each_entry_safe(dynid, n, &hdrv->dyn_list, list) { 2515 list_del(&dynid->list); 2516 kfree(dynid); 2517 } 2518 spin_unlock(&hdrv->dyn_lock); 2519 } 2520 2521 const struct hid_device_id *hid_match_device(struct hid_device *hdev, 2522 struct hid_driver *hdrv) 2523 { 2524 struct hid_dynid *dynid; 2525 2526 spin_lock(&hdrv->dyn_lock); 2527 list_for_each_entry(dynid, &hdrv->dyn_list, list) { 2528 if (hid_match_one_id(hdev, &dynid->id)) { 2529 spin_unlock(&hdrv->dyn_lock); 2530 return &dynid->id; 2531 } 2532 } 2533 spin_unlock(&hdrv->dyn_lock); 2534 2535 return hid_match_id(hdev, hdrv->id_table); 2536 } 2537 EXPORT_SYMBOL_GPL(hid_match_device); 2538 2539 static int hid_bus_match(struct device *dev, struct device_driver *drv) 2540 { 2541 struct hid_driver *hdrv = to_hid_driver(drv); 2542 struct hid_device *hdev = to_hid_device(dev); 2543 2544 return hid_match_device(hdev, hdrv) != NULL; 2545 } 2546 2547 /** 2548 * hid_compare_device_paths - check if both devices share the same path 2549 * @hdev_a: hid device 2550 * @hdev_b: hid device 2551 * @separator: char to use as separator 2552 * 2553 * Check if two devices share the same path up to the last occurrence of 2554 * the separator char. Both paths must exist (i.e., zero-length paths 2555 * don't match). 2556 */ 2557 bool hid_compare_device_paths(struct hid_device *hdev_a, 2558 struct hid_device *hdev_b, char separator) 2559 { 2560 int n1 = strrchr(hdev_a->phys, separator) - hdev_a->phys; 2561 int n2 = strrchr(hdev_b->phys, separator) - hdev_b->phys; 2562 2563 if (n1 != n2 || n1 <= 0 || n2 <= 0) 2564 return false; 2565 2566 return !strncmp(hdev_a->phys, hdev_b->phys, n1); 2567 } 2568 EXPORT_SYMBOL_GPL(hid_compare_device_paths); 2569 2570 static int hid_device_probe(struct device *dev) 2571 { 2572 struct hid_driver *hdrv = to_hid_driver(dev->driver); 2573 struct hid_device *hdev = to_hid_device(dev); 2574 const struct hid_device_id *id; 2575 int ret = 0; 2576 2577 if (down_interruptible(&hdev->driver_input_lock)) { 2578 ret = -EINTR; 2579 goto end; 2580 } 2581 hdev->io_started = false; 2582 2583 clear_bit(ffs(HID_STAT_REPROBED), &hdev->status); 2584 2585 if (!hdev->driver) { 2586 id = hid_match_device(hdev, hdrv); 2587 if (id == NULL) { 2588 ret = -ENODEV; 2589 goto unlock; 2590 } 2591 2592 if (hdrv->match) { 2593 if (!hdrv->match(hdev, hid_ignore_special_drivers)) { 2594 ret = -ENODEV; 2595 goto unlock; 2596 } 2597 } else { 2598 /* 2599 * hid-generic implements .match(), so if 2600 * hid_ignore_special_drivers is set, we can safely 2601 * return. 2602 */ 2603 if (hid_ignore_special_drivers) { 2604 ret = -ENODEV; 2605 goto unlock; 2606 } 2607 } 2608 2609 /* reset the quirks that has been previously set */ 2610 hdev->quirks = hid_lookup_quirk(hdev); 2611 hdev->driver = hdrv; 2612 if (hdrv->probe) { 2613 ret = hdrv->probe(hdev, id); 2614 } else { /* default probe */ 2615 ret = hid_open_report(hdev); 2616 if (!ret) 2617 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); 2618 } 2619 if (ret) { 2620 hid_close_report(hdev); 2621 hdev->driver = NULL; 2622 } 2623 } 2624 unlock: 2625 if (!hdev->io_started) 2626 up(&hdev->driver_input_lock); 2627 end: 2628 return ret; 2629 } 2630 2631 static void hid_device_remove(struct device *dev) 2632 { 2633 struct hid_device *hdev = to_hid_device(dev); 2634 struct hid_driver *hdrv; 2635 2636 down(&hdev->driver_input_lock); 2637 hdev->io_started = false; 2638 2639 hdrv = hdev->driver; 2640 if (hdrv) { 2641 if (hdrv->remove) 2642 hdrv->remove(hdev); 2643 else /* default remove */ 2644 hid_hw_stop(hdev); 2645 hid_close_report(hdev); 2646 hdev->driver = NULL; 2647 } 2648 2649 if (!hdev->io_started) 2650 up(&hdev->driver_input_lock); 2651 } 2652 2653 static ssize_t modalias_show(struct device *dev, struct device_attribute *a, 2654 char *buf) 2655 { 2656 struct hid_device *hdev = container_of(dev, struct hid_device, dev); 2657 2658 return scnprintf(buf, PAGE_SIZE, "hid:b%04Xg%04Xv%08Xp%08X\n", 2659 hdev->bus, hdev->group, hdev->vendor, hdev->product); 2660 } 2661 static DEVICE_ATTR_RO(modalias); 2662 2663 static struct attribute *hid_dev_attrs[] = { 2664 &dev_attr_modalias.attr, 2665 NULL, 2666 }; 2667 static struct bin_attribute *hid_dev_bin_attrs[] = { 2668 &dev_bin_attr_report_desc, 2669 NULL 2670 }; 2671 static const struct attribute_group hid_dev_group = { 2672 .attrs = hid_dev_attrs, 2673 .bin_attrs = hid_dev_bin_attrs, 2674 }; 2675 __ATTRIBUTE_GROUPS(hid_dev); 2676 2677 static int hid_uevent(struct device *dev, struct kobj_uevent_env *env) 2678 { 2679 struct hid_device *hdev = to_hid_device(dev); 2680 2681 if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X", 2682 hdev->bus, hdev->vendor, hdev->product)) 2683 return -ENOMEM; 2684 2685 if (add_uevent_var(env, "HID_NAME=%s", hdev->name)) 2686 return -ENOMEM; 2687 2688 if (add_uevent_var(env, "HID_PHYS=%s", hdev->phys)) 2689 return -ENOMEM; 2690 2691 if (add_uevent_var(env, "HID_UNIQ=%s", hdev->uniq)) 2692 return -ENOMEM; 2693 2694 if (add_uevent_var(env, "MODALIAS=hid:b%04Xg%04Xv%08Xp%08X", 2695 hdev->bus, hdev->group, hdev->vendor, hdev->product)) 2696 return -ENOMEM; 2697 2698 return 0; 2699 } 2700 2701 struct bus_type hid_bus_type = { 2702 .name = "hid", 2703 .dev_groups = hid_dev_groups, 2704 .drv_groups = hid_drv_groups, 2705 .match = hid_bus_match, 2706 .probe = hid_device_probe, 2707 .remove = hid_device_remove, 2708 .uevent = hid_uevent, 2709 }; 2710 EXPORT_SYMBOL(hid_bus_type); 2711 2712 int hid_add_device(struct hid_device *hdev) 2713 { 2714 static atomic_t id = ATOMIC_INIT(0); 2715 int ret; 2716 2717 if (WARN_ON(hdev->status & HID_STAT_ADDED)) 2718 return -EBUSY; 2719 2720 hdev->quirks = hid_lookup_quirk(hdev); 2721 2722 /* we need to kill them here, otherwise they will stay allocated to 2723 * wait for coming driver */ 2724 if (hid_ignore(hdev)) 2725 return -ENODEV; 2726 2727 /* 2728 * Check for the mandatory transport channel. 2729 */ 2730 if (!hdev->ll_driver->raw_request) { 2731 hid_err(hdev, "transport driver missing .raw_request()\n"); 2732 return -EINVAL; 2733 } 2734 2735 /* 2736 * Read the device report descriptor once and use as template 2737 * for the driver-specific modifications. 2738 */ 2739 ret = hdev->ll_driver->parse(hdev); 2740 if (ret) 2741 return ret; 2742 if (!hdev->dev_rdesc) 2743 return -ENODEV; 2744 2745 /* 2746 * Scan generic devices for group information 2747 */ 2748 if (hid_ignore_special_drivers) { 2749 hdev->group = HID_GROUP_GENERIC; 2750 } else if (!hdev->group && 2751 !(hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)) { 2752 ret = hid_scan_report(hdev); 2753 if (ret) 2754 hid_warn(hdev, "bad device descriptor (%d)\n", ret); 2755 } 2756 2757 hdev->id = atomic_inc_return(&id); 2758 2759 /* XXX hack, any other cleaner solution after the driver core 2760 * is converted to allow more than 20 bytes as the device name? */ 2761 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus, 2762 hdev->vendor, hdev->product, hdev->id); 2763 2764 hid_debug_register(hdev, dev_name(&hdev->dev)); 2765 ret = device_add(&hdev->dev); 2766 if (!ret) 2767 hdev->status |= HID_STAT_ADDED; 2768 else 2769 hid_debug_unregister(hdev); 2770 2771 return ret; 2772 } 2773 EXPORT_SYMBOL_GPL(hid_add_device); 2774 2775 /** 2776 * hid_allocate_device - allocate new hid device descriptor 2777 * 2778 * Allocate and initialize hid device, so that hid_destroy_device might be 2779 * used to free it. 2780 * 2781 * New hid_device pointer is returned on success, otherwise ERR_PTR encoded 2782 * error value. 2783 */ 2784 struct hid_device *hid_allocate_device(void) 2785 { 2786 struct hid_device *hdev; 2787 int ret = -ENOMEM; 2788 2789 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL); 2790 if (hdev == NULL) 2791 return ERR_PTR(ret); 2792 2793 device_initialize(&hdev->dev); 2794 hdev->dev.release = hid_device_release; 2795 hdev->dev.bus = &hid_bus_type; 2796 device_enable_async_suspend(&hdev->dev); 2797 2798 hid_close_report(hdev); 2799 2800 init_waitqueue_head(&hdev->debug_wait); 2801 INIT_LIST_HEAD(&hdev->debug_list); 2802 spin_lock_init(&hdev->debug_list_lock); 2803 sema_init(&hdev->driver_input_lock, 1); 2804 mutex_init(&hdev->ll_open_lock); 2805 2806 hid_bpf_device_init(hdev); 2807 2808 return hdev; 2809 } 2810 EXPORT_SYMBOL_GPL(hid_allocate_device); 2811 2812 static void hid_remove_device(struct hid_device *hdev) 2813 { 2814 if (hdev->status & HID_STAT_ADDED) { 2815 device_del(&hdev->dev); 2816 hid_debug_unregister(hdev); 2817 hdev->status &= ~HID_STAT_ADDED; 2818 } 2819 kfree(hdev->dev_rdesc); 2820 hdev->dev_rdesc = NULL; 2821 hdev->dev_rsize = 0; 2822 } 2823 2824 /** 2825 * hid_destroy_device - free previously allocated device 2826 * 2827 * @hdev: hid device 2828 * 2829 * If you allocate hid_device through hid_allocate_device, you should ever 2830 * free by this function. 2831 */ 2832 void hid_destroy_device(struct hid_device *hdev) 2833 { 2834 hid_bpf_destroy_device(hdev); 2835 hid_remove_device(hdev); 2836 put_device(&hdev->dev); 2837 } 2838 EXPORT_SYMBOL_GPL(hid_destroy_device); 2839 2840 2841 static int __hid_bus_reprobe_drivers(struct device *dev, void *data) 2842 { 2843 struct hid_driver *hdrv = data; 2844 struct hid_device *hdev = to_hid_device(dev); 2845 2846 if (hdev->driver == hdrv && 2847 !hdrv->match(hdev, hid_ignore_special_drivers) && 2848 !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status)) 2849 return device_reprobe(dev); 2850 2851 return 0; 2852 } 2853 2854 static int __hid_bus_driver_added(struct device_driver *drv, void *data) 2855 { 2856 struct hid_driver *hdrv = to_hid_driver(drv); 2857 2858 if (hdrv->match) { 2859 bus_for_each_dev(&hid_bus_type, NULL, hdrv, 2860 __hid_bus_reprobe_drivers); 2861 } 2862 2863 return 0; 2864 } 2865 2866 static int __bus_removed_driver(struct device_driver *drv, void *data) 2867 { 2868 return bus_rescan_devices(&hid_bus_type); 2869 } 2870 2871 int __hid_register_driver(struct hid_driver *hdrv, struct module *owner, 2872 const char *mod_name) 2873 { 2874 int ret; 2875 2876 hdrv->driver.name = hdrv->name; 2877 hdrv->driver.bus = &hid_bus_type; 2878 hdrv->driver.owner = owner; 2879 hdrv->driver.mod_name = mod_name; 2880 2881 INIT_LIST_HEAD(&hdrv->dyn_list); 2882 spin_lock_init(&hdrv->dyn_lock); 2883 2884 ret = driver_register(&hdrv->driver); 2885 2886 if (ret == 0) 2887 bus_for_each_drv(&hid_bus_type, NULL, NULL, 2888 __hid_bus_driver_added); 2889 2890 return ret; 2891 } 2892 EXPORT_SYMBOL_GPL(__hid_register_driver); 2893 2894 void hid_unregister_driver(struct hid_driver *hdrv) 2895 { 2896 driver_unregister(&hdrv->driver); 2897 hid_free_dynids(hdrv); 2898 2899 bus_for_each_drv(&hid_bus_type, NULL, hdrv, __bus_removed_driver); 2900 } 2901 EXPORT_SYMBOL_GPL(hid_unregister_driver); 2902 2903 int hid_check_keys_pressed(struct hid_device *hid) 2904 { 2905 struct hid_input *hidinput; 2906 int i; 2907 2908 if (!(hid->claimed & HID_CLAIMED_INPUT)) 2909 return 0; 2910 2911 list_for_each_entry(hidinput, &hid->inputs, list) { 2912 for (i = 0; i < BITS_TO_LONGS(KEY_MAX); i++) 2913 if (hidinput->input->key[i]) 2914 return 1; 2915 } 2916 2917 return 0; 2918 } 2919 EXPORT_SYMBOL_GPL(hid_check_keys_pressed); 2920 2921 #ifdef CONFIG_HID_BPF 2922 static struct hid_bpf_ops hid_ops = { 2923 .hid_get_report = hid_get_report, 2924 .hid_hw_raw_request = hid_hw_raw_request, 2925 .owner = THIS_MODULE, 2926 .bus_type = &hid_bus_type, 2927 }; 2928 #endif 2929 2930 static int __init hid_init(void) 2931 { 2932 int ret; 2933 2934 if (hid_debug) 2935 pr_warn("hid_debug is now used solely for parser and driver debugging.\n" 2936 "debugfs is now used for inspecting the device (report descriptor, reports)\n"); 2937 2938 ret = bus_register(&hid_bus_type); 2939 if (ret) { 2940 pr_err("can't register hid bus\n"); 2941 goto err; 2942 } 2943 2944 #ifdef CONFIG_HID_BPF 2945 hid_bpf_ops = &hid_ops; 2946 #endif 2947 2948 ret = hidraw_init(); 2949 if (ret) 2950 goto err_bus; 2951 2952 hid_debug_init(); 2953 2954 return 0; 2955 err_bus: 2956 bus_unregister(&hid_bus_type); 2957 err: 2958 return ret; 2959 } 2960 2961 static void __exit hid_exit(void) 2962 { 2963 #ifdef CONFIG_HID_BPF 2964 hid_bpf_ops = NULL; 2965 #endif 2966 hid_debug_exit(); 2967 hidraw_exit(); 2968 bus_unregister(&hid_bus_type); 2969 hid_quirks_exit(HID_BUS_ANY); 2970 } 2971 2972 module_init(hid_init); 2973 module_exit(hid_exit); 2974 2975 MODULE_AUTHOR("Andreas Gal"); 2976 MODULE_AUTHOR("Vojtech Pavlik"); 2977 MODULE_AUTHOR("Jiri Kosina"); 2978 MODULE_LICENSE("GPL"); 2979