1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * HID support for Linux 4 * 5 * Copyright (c) 1999 Andreas Gal 6 * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> 7 * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc 8 * Copyright (c) 2006-2012 Jiri Kosina 9 */ 10 11 /* 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 16 #include <linux/module.h> 17 #include <linux/slab.h> 18 #include <linux/init.h> 19 #include <linux/kernel.h> 20 #include <linux/list.h> 21 #include <linux/mm.h> 22 #include <linux/spinlock.h> 23 #include <asm/unaligned.h> 24 #include <asm/byteorder.h> 25 #include <linux/input.h> 26 #include <linux/wait.h> 27 #include <linux/vmalloc.h> 28 #include <linux/sched.h> 29 #include <linux/semaphore.h> 30 31 #include <linux/hid.h> 32 #include <linux/hiddev.h> 33 #include <linux/hid-debug.h> 34 #include <linux/hidraw.h> 35 36 #include "hid-ids.h" 37 38 /* 39 * Version Information 40 */ 41 42 #define DRIVER_DESC "HID core driver" 43 44 static int hid_ignore_special_drivers = 0; 45 module_param_named(ignore_special_drivers, hid_ignore_special_drivers, int, 0600); 46 MODULE_PARM_DESC(ignore_special_drivers, "Ignore any special drivers and handle all devices by generic driver"); 47 48 /* 49 * Register a new report for a device. 50 */ 51 52 struct hid_report *hid_register_report(struct hid_device *device, 53 enum hid_report_type type, unsigned int id, 54 unsigned int application) 55 { 56 struct hid_report_enum *report_enum = device->report_enum + type; 57 struct hid_report *report; 58 59 if (id >= HID_MAX_IDS) 60 return NULL; 61 if (report_enum->report_id_hash[id]) 62 return report_enum->report_id_hash[id]; 63 64 report = kzalloc(sizeof(struct hid_report), GFP_KERNEL); 65 if (!report) 66 return NULL; 67 68 if (id != 0) 69 report_enum->numbered = 1; 70 71 report->id = id; 72 report->type = type; 73 report->size = 0; 74 report->device = device; 75 report->application = application; 76 report_enum->report_id_hash[id] = report; 77 78 list_add_tail(&report->list, &report_enum->report_list); 79 INIT_LIST_HEAD(&report->field_entry_list); 80 81 return report; 82 } 83 EXPORT_SYMBOL_GPL(hid_register_report); 84 85 /* 86 * Register a new field for this report. 87 */ 88 89 static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages) 90 { 91 struct hid_field *field; 92 93 if (report->maxfield == HID_MAX_FIELDS) { 94 hid_err(report->device, "too many fields in report\n"); 95 return NULL; 96 } 97 98 field = kvzalloc((sizeof(struct hid_field) + 99 usages * sizeof(struct hid_usage) + 100 3 * usages * sizeof(unsigned int)), GFP_KERNEL); 101 if (!field) 102 return NULL; 103 104 field->index = report->maxfield++; 105 report->field[field->index] = field; 106 field->usage = (struct hid_usage *)(field + 1); 107 field->value = (s32 *)(field->usage + usages); 108 field->new_value = (s32 *)(field->value + usages); 109 field->usages_priorities = (s32 *)(field->new_value + usages); 110 field->report = report; 111 112 return field; 113 } 114 115 /* 116 * Open a collection. The type/usage is pushed on the stack. 117 */ 118 119 static int open_collection(struct hid_parser *parser, unsigned type) 120 { 121 struct hid_collection *collection; 122 unsigned usage; 123 int collection_index; 124 125 usage = parser->local.usage[0]; 126 127 if (parser->collection_stack_ptr == parser->collection_stack_size) { 128 unsigned int *collection_stack; 129 unsigned int new_size = parser->collection_stack_size + 130 HID_COLLECTION_STACK_SIZE; 131 132 collection_stack = krealloc(parser->collection_stack, 133 new_size * sizeof(unsigned int), 134 GFP_KERNEL); 135 if (!collection_stack) 136 return -ENOMEM; 137 138 parser->collection_stack = collection_stack; 139 parser->collection_stack_size = new_size; 140 } 141 142 if (parser->device->maxcollection == parser->device->collection_size) { 143 collection = kmalloc( 144 array3_size(sizeof(struct hid_collection), 145 parser->device->collection_size, 146 2), 147 GFP_KERNEL); 148 if (collection == NULL) { 149 hid_err(parser->device, "failed to reallocate collection array\n"); 150 return -ENOMEM; 151 } 152 memcpy(collection, parser->device->collection, 153 sizeof(struct hid_collection) * 154 parser->device->collection_size); 155 memset(collection + parser->device->collection_size, 0, 156 sizeof(struct hid_collection) * 157 parser->device->collection_size); 158 kfree(parser->device->collection); 159 parser->device->collection = collection; 160 parser->device->collection_size *= 2; 161 } 162 163 parser->collection_stack[parser->collection_stack_ptr++] = 164 parser->device->maxcollection; 165 166 collection_index = parser->device->maxcollection++; 167 collection = parser->device->collection + collection_index; 168 collection->type = type; 169 collection->usage = usage; 170 collection->level = parser->collection_stack_ptr - 1; 171 collection->parent_idx = (collection->level == 0) ? -1 : 172 parser->collection_stack[collection->level - 1]; 173 174 if (type == HID_COLLECTION_APPLICATION) 175 parser->device->maxapplication++; 176 177 return 0; 178 } 179 180 /* 181 * Close a collection. 182 */ 183 184 static int close_collection(struct hid_parser *parser) 185 { 186 if (!parser->collection_stack_ptr) { 187 hid_err(parser->device, "collection stack underflow\n"); 188 return -EINVAL; 189 } 190 parser->collection_stack_ptr--; 191 return 0; 192 } 193 194 /* 195 * Climb up the stack, search for the specified collection type 196 * and return the usage. 197 */ 198 199 static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type) 200 { 201 struct hid_collection *collection = parser->device->collection; 202 int n; 203 204 for (n = parser->collection_stack_ptr - 1; n >= 0; n--) { 205 unsigned index = parser->collection_stack[n]; 206 if (collection[index].type == type) 207 return collection[index].usage; 208 } 209 return 0; /* we know nothing about this usage type */ 210 } 211 212 /* 213 * Concatenate usage which defines 16 bits or less with the 214 * currently defined usage page to form a 32 bit usage 215 */ 216 217 static void complete_usage(struct hid_parser *parser, unsigned int index) 218 { 219 parser->local.usage[index] &= 0xFFFF; 220 parser->local.usage[index] |= 221 (parser->global.usage_page & 0xFFFF) << 16; 222 } 223 224 /* 225 * Add a usage to the temporary parser table. 226 */ 227 228 static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size) 229 { 230 if (parser->local.usage_index >= HID_MAX_USAGES) { 231 hid_err(parser->device, "usage index exceeded\n"); 232 return -1; 233 } 234 parser->local.usage[parser->local.usage_index] = usage; 235 236 /* 237 * If Usage item only includes usage id, concatenate it with 238 * currently defined usage page 239 */ 240 if (size <= 2) 241 complete_usage(parser, parser->local.usage_index); 242 243 parser->local.usage_size[parser->local.usage_index] = size; 244 parser->local.collection_index[parser->local.usage_index] = 245 parser->collection_stack_ptr ? 246 parser->collection_stack[parser->collection_stack_ptr - 1] : 0; 247 parser->local.usage_index++; 248 return 0; 249 } 250 251 /* 252 * Register a new field for this report. 253 */ 254 255 static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsigned flags) 256 { 257 struct hid_report *report; 258 struct hid_field *field; 259 unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE; 260 unsigned int usages; 261 unsigned int offset; 262 unsigned int i; 263 unsigned int application; 264 265 application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION); 266 267 report = hid_register_report(parser->device, report_type, 268 parser->global.report_id, application); 269 if (!report) { 270 hid_err(parser->device, "hid_register_report failed\n"); 271 return -1; 272 } 273 274 /* Handle both signed and unsigned cases properly */ 275 if ((parser->global.logical_minimum < 0 && 276 parser->global.logical_maximum < 277 parser->global.logical_minimum) || 278 (parser->global.logical_minimum >= 0 && 279 (__u32)parser->global.logical_maximum < 280 (__u32)parser->global.logical_minimum)) { 281 dbg_hid("logical range invalid 0x%x 0x%x\n", 282 parser->global.logical_minimum, 283 parser->global.logical_maximum); 284 return -1; 285 } 286 287 offset = report->size; 288 report->size += parser->global.report_size * parser->global.report_count; 289 290 if (parser->device->ll_driver->max_buffer_size) 291 max_buffer_size = parser->device->ll_driver->max_buffer_size; 292 293 /* Total size check: Allow for possible report index byte */ 294 if (report->size > (max_buffer_size - 1) << 3) { 295 hid_err(parser->device, "report is too long\n"); 296 return -1; 297 } 298 299 if (!parser->local.usage_index) /* Ignore padding fields */ 300 return 0; 301 302 usages = max_t(unsigned, parser->local.usage_index, 303 parser->global.report_count); 304 305 field = hid_register_field(report, usages); 306 if (!field) 307 return 0; 308 309 field->physical = hid_lookup_collection(parser, HID_COLLECTION_PHYSICAL); 310 field->logical = hid_lookup_collection(parser, HID_COLLECTION_LOGICAL); 311 field->application = application; 312 313 for (i = 0; i < usages; i++) { 314 unsigned j = i; 315 /* Duplicate the last usage we parsed if we have excess values */ 316 if (i >= parser->local.usage_index) 317 j = parser->local.usage_index - 1; 318 field->usage[i].hid = parser->local.usage[j]; 319 field->usage[i].collection_index = 320 parser->local.collection_index[j]; 321 field->usage[i].usage_index = i; 322 field->usage[i].resolution_multiplier = 1; 323 } 324 325 field->maxusage = usages; 326 field->flags = flags; 327 field->report_offset = offset; 328 field->report_type = report_type; 329 field->report_size = parser->global.report_size; 330 field->report_count = parser->global.report_count; 331 field->logical_minimum = parser->global.logical_minimum; 332 field->logical_maximum = parser->global.logical_maximum; 333 field->physical_minimum = parser->global.physical_minimum; 334 field->physical_maximum = parser->global.physical_maximum; 335 field->unit_exponent = parser->global.unit_exponent; 336 field->unit = parser->global.unit; 337 338 return 0; 339 } 340 341 /* 342 * Read data value from item. 343 */ 344 345 static u32 item_udata(struct hid_item *item) 346 { 347 switch (item->size) { 348 case 1: return item->data.u8; 349 case 2: return item->data.u16; 350 case 4: return item->data.u32; 351 } 352 return 0; 353 } 354 355 static s32 item_sdata(struct hid_item *item) 356 { 357 switch (item->size) { 358 case 1: return item->data.s8; 359 case 2: return item->data.s16; 360 case 4: return item->data.s32; 361 } 362 return 0; 363 } 364 365 /* 366 * Process a global item. 367 */ 368 369 static int hid_parser_global(struct hid_parser *parser, struct hid_item *item) 370 { 371 __s32 raw_value; 372 switch (item->tag) { 373 case HID_GLOBAL_ITEM_TAG_PUSH: 374 375 if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) { 376 hid_err(parser->device, "global environment stack overflow\n"); 377 return -1; 378 } 379 380 memcpy(parser->global_stack + parser->global_stack_ptr++, 381 &parser->global, sizeof(struct hid_global)); 382 return 0; 383 384 case HID_GLOBAL_ITEM_TAG_POP: 385 386 if (!parser->global_stack_ptr) { 387 hid_err(parser->device, "global environment stack underflow\n"); 388 return -1; 389 } 390 391 memcpy(&parser->global, parser->global_stack + 392 --parser->global_stack_ptr, sizeof(struct hid_global)); 393 return 0; 394 395 case HID_GLOBAL_ITEM_TAG_USAGE_PAGE: 396 parser->global.usage_page = item_udata(item); 397 return 0; 398 399 case HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM: 400 parser->global.logical_minimum = item_sdata(item); 401 return 0; 402 403 case HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM: 404 if (parser->global.logical_minimum < 0) 405 parser->global.logical_maximum = item_sdata(item); 406 else 407 parser->global.logical_maximum = item_udata(item); 408 return 0; 409 410 case HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM: 411 parser->global.physical_minimum = item_sdata(item); 412 return 0; 413 414 case HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM: 415 if (parser->global.physical_minimum < 0) 416 parser->global.physical_maximum = item_sdata(item); 417 else 418 parser->global.physical_maximum = item_udata(item); 419 return 0; 420 421 case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT: 422 /* Many devices provide unit exponent as a two's complement 423 * nibble due to the common misunderstanding of HID 424 * specification 1.11, 6.2.2.7 Global Items. Attempt to handle 425 * both this and the standard encoding. */ 426 raw_value = item_sdata(item); 427 if (!(raw_value & 0xfffffff0)) 428 parser->global.unit_exponent = hid_snto32(raw_value, 4); 429 else 430 parser->global.unit_exponent = raw_value; 431 return 0; 432 433 case HID_GLOBAL_ITEM_TAG_UNIT: 434 parser->global.unit = item_udata(item); 435 return 0; 436 437 case HID_GLOBAL_ITEM_TAG_REPORT_SIZE: 438 parser->global.report_size = item_udata(item); 439 if (parser->global.report_size > 256) { 440 hid_err(parser->device, "invalid report_size %d\n", 441 parser->global.report_size); 442 return -1; 443 } 444 return 0; 445 446 case HID_GLOBAL_ITEM_TAG_REPORT_COUNT: 447 parser->global.report_count = item_udata(item); 448 if (parser->global.report_count > HID_MAX_USAGES) { 449 hid_err(parser->device, "invalid report_count %d\n", 450 parser->global.report_count); 451 return -1; 452 } 453 return 0; 454 455 case HID_GLOBAL_ITEM_TAG_REPORT_ID: 456 parser->global.report_id = item_udata(item); 457 if (parser->global.report_id == 0 || 458 parser->global.report_id >= HID_MAX_IDS) { 459 hid_err(parser->device, "report_id %u is invalid\n", 460 parser->global.report_id); 461 return -1; 462 } 463 return 0; 464 465 default: 466 hid_err(parser->device, "unknown global tag 0x%x\n", item->tag); 467 return -1; 468 } 469 } 470 471 /* 472 * Process a local item. 473 */ 474 475 static int hid_parser_local(struct hid_parser *parser, struct hid_item *item) 476 { 477 __u32 data; 478 unsigned n; 479 __u32 count; 480 481 data = item_udata(item); 482 483 switch (item->tag) { 484 case HID_LOCAL_ITEM_TAG_DELIMITER: 485 486 if (data) { 487 /* 488 * We treat items before the first delimiter 489 * as global to all usage sets (branch 0). 490 * In the moment we process only these global 491 * items and the first delimiter set. 492 */ 493 if (parser->local.delimiter_depth != 0) { 494 hid_err(parser->device, "nested delimiters\n"); 495 return -1; 496 } 497 parser->local.delimiter_depth++; 498 parser->local.delimiter_branch++; 499 } else { 500 if (parser->local.delimiter_depth < 1) { 501 hid_err(parser->device, "bogus close delimiter\n"); 502 return -1; 503 } 504 parser->local.delimiter_depth--; 505 } 506 return 0; 507 508 case HID_LOCAL_ITEM_TAG_USAGE: 509 510 if (parser->local.delimiter_branch > 1) { 511 dbg_hid("alternative usage ignored\n"); 512 return 0; 513 } 514 515 return hid_add_usage(parser, data, item->size); 516 517 case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM: 518 519 if (parser->local.delimiter_branch > 1) { 520 dbg_hid("alternative usage ignored\n"); 521 return 0; 522 } 523 524 parser->local.usage_minimum = data; 525 return 0; 526 527 case HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM: 528 529 if (parser->local.delimiter_branch > 1) { 530 dbg_hid("alternative usage ignored\n"); 531 return 0; 532 } 533 534 count = data - parser->local.usage_minimum; 535 if (count + parser->local.usage_index >= HID_MAX_USAGES) { 536 /* 537 * We do not warn if the name is not set, we are 538 * actually pre-scanning the device. 539 */ 540 if (dev_name(&parser->device->dev)) 541 hid_warn(parser->device, 542 "ignoring exceeding usage max\n"); 543 data = HID_MAX_USAGES - parser->local.usage_index + 544 parser->local.usage_minimum - 1; 545 if (data <= 0) { 546 hid_err(parser->device, 547 "no more usage index available\n"); 548 return -1; 549 } 550 } 551 552 for (n = parser->local.usage_minimum; n <= data; n++) 553 if (hid_add_usage(parser, n, item->size)) { 554 dbg_hid("hid_add_usage failed\n"); 555 return -1; 556 } 557 return 0; 558 559 default: 560 561 dbg_hid("unknown local item tag 0x%x\n", item->tag); 562 return 0; 563 } 564 return 0; 565 } 566 567 /* 568 * Concatenate Usage Pages into Usages where relevant: 569 * As per specification, 6.2.2.8: "When the parser encounters a main item it 570 * concatenates the last declared Usage Page with a Usage to form a complete 571 * usage value." 572 */ 573 574 static void hid_concatenate_last_usage_page(struct hid_parser *parser) 575 { 576 int i; 577 unsigned int usage_page; 578 unsigned int current_page; 579 580 if (!parser->local.usage_index) 581 return; 582 583 usage_page = parser->global.usage_page; 584 585 /* 586 * Concatenate usage page again only if last declared Usage Page 587 * has not been already used in previous usages concatenation 588 */ 589 for (i = parser->local.usage_index - 1; i >= 0; i--) { 590 if (parser->local.usage_size[i] > 2) 591 /* Ignore extended usages */ 592 continue; 593 594 current_page = parser->local.usage[i] >> 16; 595 if (current_page == usage_page) 596 break; 597 598 complete_usage(parser, i); 599 } 600 } 601 602 /* 603 * Process a main item. 604 */ 605 606 static int hid_parser_main(struct hid_parser *parser, struct hid_item *item) 607 { 608 __u32 data; 609 int ret; 610 611 hid_concatenate_last_usage_page(parser); 612 613 data = item_udata(item); 614 615 switch (item->tag) { 616 case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION: 617 ret = open_collection(parser, data & 0xff); 618 break; 619 case HID_MAIN_ITEM_TAG_END_COLLECTION: 620 ret = close_collection(parser); 621 break; 622 case HID_MAIN_ITEM_TAG_INPUT: 623 ret = hid_add_field(parser, HID_INPUT_REPORT, data); 624 break; 625 case HID_MAIN_ITEM_TAG_OUTPUT: 626 ret = hid_add_field(parser, HID_OUTPUT_REPORT, data); 627 break; 628 case HID_MAIN_ITEM_TAG_FEATURE: 629 ret = hid_add_field(parser, HID_FEATURE_REPORT, data); 630 break; 631 default: 632 hid_warn(parser->device, "unknown main item tag 0x%x\n", item->tag); 633 ret = 0; 634 } 635 636 memset(&parser->local, 0, sizeof(parser->local)); /* Reset the local parser environment */ 637 638 return ret; 639 } 640 641 /* 642 * Process a reserved item. 643 */ 644 645 static int hid_parser_reserved(struct hid_parser *parser, struct hid_item *item) 646 { 647 dbg_hid("reserved item type, tag 0x%x\n", item->tag); 648 return 0; 649 } 650 651 /* 652 * Free a report and all registered fields. The field->usage and 653 * field->value table's are allocated behind the field, so we need 654 * only to free(field) itself. 655 */ 656 657 static void hid_free_report(struct hid_report *report) 658 { 659 unsigned n; 660 661 kfree(report->field_entries); 662 663 for (n = 0; n < report->maxfield; n++) 664 kvfree(report->field[n]); 665 kfree(report); 666 } 667 668 /* 669 * Close report. This function returns the device 670 * state to the point prior to hid_open_report(). 671 */ 672 static void hid_close_report(struct hid_device *device) 673 { 674 unsigned i, j; 675 676 for (i = 0; i < HID_REPORT_TYPES; i++) { 677 struct hid_report_enum *report_enum = device->report_enum + i; 678 679 for (j = 0; j < HID_MAX_IDS; j++) { 680 struct hid_report *report = report_enum->report_id_hash[j]; 681 if (report) 682 hid_free_report(report); 683 } 684 memset(report_enum, 0, sizeof(*report_enum)); 685 INIT_LIST_HEAD(&report_enum->report_list); 686 } 687 688 kfree(device->rdesc); 689 device->rdesc = NULL; 690 device->rsize = 0; 691 692 kfree(device->collection); 693 device->collection = NULL; 694 device->collection_size = 0; 695 device->maxcollection = 0; 696 device->maxapplication = 0; 697 698 device->status &= ~HID_STAT_PARSED; 699 } 700 701 /* 702 * Free a device structure, all reports, and all fields. 703 */ 704 705 void hiddev_free(struct kref *ref) 706 { 707 struct hid_device *hid = container_of(ref, struct hid_device, ref); 708 709 hid_close_report(hid); 710 kfree(hid->dev_rdesc); 711 kfree(hid); 712 } 713 714 static void hid_device_release(struct device *dev) 715 { 716 struct hid_device *hid = to_hid_device(dev); 717 718 kref_put(&hid->ref, hiddev_free); 719 } 720 721 /* 722 * Fetch a report description item from the data stream. We support long 723 * items, though they are not used yet. 724 */ 725 726 static u8 *fetch_item(__u8 *start, __u8 *end, struct hid_item *item) 727 { 728 u8 b; 729 730 if ((end - start) <= 0) 731 return NULL; 732 733 b = *start++; 734 735 item->type = (b >> 2) & 3; 736 item->tag = (b >> 4) & 15; 737 738 if (item->tag == HID_ITEM_TAG_LONG) { 739 740 item->format = HID_ITEM_FORMAT_LONG; 741 742 if ((end - start) < 2) 743 return NULL; 744 745 item->size = *start++; 746 item->tag = *start++; 747 748 if ((end - start) < item->size) 749 return NULL; 750 751 item->data.longdata = start; 752 start += item->size; 753 return start; 754 } 755 756 item->format = HID_ITEM_FORMAT_SHORT; 757 item->size = b & 3; 758 759 switch (item->size) { 760 case 0: 761 return start; 762 763 case 1: 764 if ((end - start) < 1) 765 return NULL; 766 item->data.u8 = *start++; 767 return start; 768 769 case 2: 770 if ((end - start) < 2) 771 return NULL; 772 item->data.u16 = get_unaligned_le16(start); 773 start = (__u8 *)((__le16 *)start + 1); 774 return start; 775 776 case 3: 777 item->size++; 778 if ((end - start) < 4) 779 return NULL; 780 item->data.u32 = get_unaligned_le32(start); 781 start = (__u8 *)((__le32 *)start + 1); 782 return start; 783 } 784 785 return NULL; 786 } 787 788 static void hid_scan_input_usage(struct hid_parser *parser, u32 usage) 789 { 790 struct hid_device *hid = parser->device; 791 792 if (usage == HID_DG_CONTACTID) 793 hid->group = HID_GROUP_MULTITOUCH; 794 } 795 796 static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage) 797 { 798 if (usage == 0xff0000c5 && parser->global.report_count == 256 && 799 parser->global.report_size == 8) 800 parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8; 801 802 if (usage == 0xff0000c6 && parser->global.report_count == 1 && 803 parser->global.report_size == 8) 804 parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8; 805 } 806 807 static void hid_scan_collection(struct hid_parser *parser, unsigned type) 808 { 809 struct hid_device *hid = parser->device; 810 int i; 811 812 if (((parser->global.usage_page << 16) == HID_UP_SENSOR) && 813 (type == HID_COLLECTION_PHYSICAL || 814 type == HID_COLLECTION_APPLICATION)) 815 hid->group = HID_GROUP_SENSOR_HUB; 816 817 if (hid->vendor == USB_VENDOR_ID_MICROSOFT && 818 hid->product == USB_DEVICE_ID_MS_POWER_COVER && 819 hid->group == HID_GROUP_MULTITOUCH) 820 hid->group = HID_GROUP_GENERIC; 821 822 if ((parser->global.usage_page << 16) == HID_UP_GENDESK) 823 for (i = 0; i < parser->local.usage_index; i++) 824 if (parser->local.usage[i] == HID_GD_POINTER) 825 parser->scan_flags |= HID_SCAN_FLAG_GD_POINTER; 826 827 if ((parser->global.usage_page << 16) >= HID_UP_MSVENDOR) 828 parser->scan_flags |= HID_SCAN_FLAG_VENDOR_SPECIFIC; 829 830 if ((parser->global.usage_page << 16) == HID_UP_GOOGLEVENDOR) 831 for (i = 0; i < parser->local.usage_index; i++) 832 if (parser->local.usage[i] == 833 (HID_UP_GOOGLEVENDOR | 0x0001)) 834 parser->device->group = 835 HID_GROUP_VIVALDI; 836 } 837 838 static int hid_scan_main(struct hid_parser *parser, struct hid_item *item) 839 { 840 __u32 data; 841 int i; 842 843 hid_concatenate_last_usage_page(parser); 844 845 data = item_udata(item); 846 847 switch (item->tag) { 848 case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION: 849 hid_scan_collection(parser, data & 0xff); 850 break; 851 case HID_MAIN_ITEM_TAG_END_COLLECTION: 852 break; 853 case HID_MAIN_ITEM_TAG_INPUT: 854 /* ignore constant inputs, they will be ignored by hid-input */ 855 if (data & HID_MAIN_ITEM_CONSTANT) 856 break; 857 for (i = 0; i < parser->local.usage_index; i++) 858 hid_scan_input_usage(parser, parser->local.usage[i]); 859 break; 860 case HID_MAIN_ITEM_TAG_OUTPUT: 861 break; 862 case HID_MAIN_ITEM_TAG_FEATURE: 863 for (i = 0; i < parser->local.usage_index; i++) 864 hid_scan_feature_usage(parser, parser->local.usage[i]); 865 break; 866 } 867 868 /* Reset the local parser environment */ 869 memset(&parser->local, 0, sizeof(parser->local)); 870 871 return 0; 872 } 873 874 /* 875 * Scan a report descriptor before the device is added to the bus. 876 * Sets device groups and other properties that determine what driver 877 * to load. 878 */ 879 static int hid_scan_report(struct hid_device *hid) 880 { 881 struct hid_parser *parser; 882 struct hid_item item; 883 __u8 *start = hid->dev_rdesc; 884 __u8 *end = start + hid->dev_rsize; 885 static int (*dispatch_type[])(struct hid_parser *parser, 886 struct hid_item *item) = { 887 hid_scan_main, 888 hid_parser_global, 889 hid_parser_local, 890 hid_parser_reserved 891 }; 892 893 parser = vzalloc(sizeof(struct hid_parser)); 894 if (!parser) 895 return -ENOMEM; 896 897 parser->device = hid; 898 hid->group = HID_GROUP_GENERIC; 899 900 /* 901 * The parsing is simpler than the one in hid_open_report() as we should 902 * be robust against hid errors. Those errors will be raised by 903 * hid_open_report() anyway. 904 */ 905 while ((start = fetch_item(start, end, &item)) != NULL) 906 dispatch_type[item.type](parser, &item); 907 908 /* 909 * Handle special flags set during scanning. 910 */ 911 if ((parser->scan_flags & HID_SCAN_FLAG_MT_WIN_8) && 912 (hid->group == HID_GROUP_MULTITOUCH)) 913 hid->group = HID_GROUP_MULTITOUCH_WIN_8; 914 915 /* 916 * Vendor specific handlings 917 */ 918 switch (hid->vendor) { 919 case USB_VENDOR_ID_WACOM: 920 hid->group = HID_GROUP_WACOM; 921 break; 922 case USB_VENDOR_ID_SYNAPTICS: 923 if (hid->group == HID_GROUP_GENERIC) 924 if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC) 925 && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER)) 926 /* 927 * hid-rmi should take care of them, 928 * not hid-generic 929 */ 930 hid->group = HID_GROUP_RMI; 931 break; 932 } 933 934 kfree(parser->collection_stack); 935 vfree(parser); 936 return 0; 937 } 938 939 /** 940 * hid_parse_report - parse device report 941 * 942 * @hid: hid device 943 * @start: report start 944 * @size: report size 945 * 946 * Allocate the device report as read by the bus driver. This function should 947 * only be called from parse() in ll drivers. 948 */ 949 int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size) 950 { 951 hid->dev_rdesc = kmemdup(start, size, GFP_KERNEL); 952 if (!hid->dev_rdesc) 953 return -ENOMEM; 954 hid->dev_rsize = size; 955 return 0; 956 } 957 EXPORT_SYMBOL_GPL(hid_parse_report); 958 959 static const char * const hid_report_names[] = { 960 "HID_INPUT_REPORT", 961 "HID_OUTPUT_REPORT", 962 "HID_FEATURE_REPORT", 963 }; 964 /** 965 * hid_validate_values - validate existing device report's value indexes 966 * 967 * @hid: hid device 968 * @type: which report type to examine 969 * @id: which report ID to examine (0 for first) 970 * @field_index: which report field to examine 971 * @report_counts: expected number of values 972 * 973 * Validate the number of values in a given field of a given report, after 974 * parsing. 975 */ 976 struct hid_report *hid_validate_values(struct hid_device *hid, 977 enum hid_report_type type, unsigned int id, 978 unsigned int field_index, 979 unsigned int report_counts) 980 { 981 struct hid_report *report; 982 983 if (type > HID_FEATURE_REPORT) { 984 hid_err(hid, "invalid HID report type %u\n", type); 985 return NULL; 986 } 987 988 if (id >= HID_MAX_IDS) { 989 hid_err(hid, "invalid HID report id %u\n", id); 990 return NULL; 991 } 992 993 /* 994 * Explicitly not using hid_get_report() here since it depends on 995 * ->numbered being checked, which may not always be the case when 996 * drivers go to access report values. 997 */ 998 if (id == 0) { 999 /* 1000 * Validating on id 0 means we should examine the first 1001 * report in the list. 1002 */ 1003 report = list_first_entry_or_null( 1004 &hid->report_enum[type].report_list, 1005 struct hid_report, list); 1006 } else { 1007 report = hid->report_enum[type].report_id_hash[id]; 1008 } 1009 if (!report) { 1010 hid_err(hid, "missing %s %u\n", hid_report_names[type], id); 1011 return NULL; 1012 } 1013 if (report->maxfield <= field_index) { 1014 hid_err(hid, "not enough fields in %s %u\n", 1015 hid_report_names[type], id); 1016 return NULL; 1017 } 1018 if (report->field[field_index]->report_count < report_counts) { 1019 hid_err(hid, "not enough values in %s %u field %u\n", 1020 hid_report_names[type], id, field_index); 1021 return NULL; 1022 } 1023 return report; 1024 } 1025 EXPORT_SYMBOL_GPL(hid_validate_values); 1026 1027 static int hid_calculate_multiplier(struct hid_device *hid, 1028 struct hid_field *multiplier) 1029 { 1030 int m; 1031 __s32 v = *multiplier->value; 1032 __s32 lmin = multiplier->logical_minimum; 1033 __s32 lmax = multiplier->logical_maximum; 1034 __s32 pmin = multiplier->physical_minimum; 1035 __s32 pmax = multiplier->physical_maximum; 1036 1037 /* 1038 * "Because OS implementations will generally divide the control's 1039 * reported count by the Effective Resolution Multiplier, designers 1040 * should take care not to establish a potential Effective 1041 * Resolution Multiplier of zero." 1042 * HID Usage Table, v1.12, Section 4.3.1, p31 1043 */ 1044 if (lmax - lmin == 0) 1045 return 1; 1046 /* 1047 * Handling the unit exponent is left as an exercise to whoever 1048 * finds a device where that exponent is not 0. 1049 */ 1050 m = ((v - lmin)/(lmax - lmin) * (pmax - pmin) + pmin); 1051 if (unlikely(multiplier->unit_exponent != 0)) { 1052 hid_warn(hid, 1053 "unsupported Resolution Multiplier unit exponent %d\n", 1054 multiplier->unit_exponent); 1055 } 1056 1057 /* There are no devices with an effective multiplier > 255 */ 1058 if (unlikely(m == 0 || m > 255 || m < -255)) { 1059 hid_warn(hid, "unsupported Resolution Multiplier %d\n", m); 1060 m = 1; 1061 } 1062 1063 return m; 1064 } 1065 1066 static void hid_apply_multiplier_to_field(struct hid_device *hid, 1067 struct hid_field *field, 1068 struct hid_collection *multiplier_collection, 1069 int effective_multiplier) 1070 { 1071 struct hid_collection *collection; 1072 struct hid_usage *usage; 1073 int i; 1074 1075 /* 1076 * If multiplier_collection is NULL, the multiplier applies 1077 * to all fields in the report. 1078 * Otherwise, it is the Logical Collection the multiplier applies to 1079 * but our field may be in a subcollection of that collection. 1080 */ 1081 for (i = 0; i < field->maxusage; i++) { 1082 usage = &field->usage[i]; 1083 1084 collection = &hid->collection[usage->collection_index]; 1085 while (collection->parent_idx != -1 && 1086 collection != multiplier_collection) 1087 collection = &hid->collection[collection->parent_idx]; 1088 1089 if (collection->parent_idx != -1 || 1090 multiplier_collection == NULL) 1091 usage->resolution_multiplier = effective_multiplier; 1092 1093 } 1094 } 1095 1096 static void hid_apply_multiplier(struct hid_device *hid, 1097 struct hid_field *multiplier) 1098 { 1099 struct hid_report_enum *rep_enum; 1100 struct hid_report *rep; 1101 struct hid_field *field; 1102 struct hid_collection *multiplier_collection; 1103 int effective_multiplier; 1104 int i; 1105 1106 /* 1107 * "The Resolution Multiplier control must be contained in the same 1108 * Logical Collection as the control(s) to which it is to be applied. 1109 * If no Resolution Multiplier is defined, then the Resolution 1110 * Multiplier defaults to 1. If more than one control exists in a 1111 * Logical Collection, the Resolution Multiplier is associated with 1112 * all controls in the collection. If no Logical Collection is 1113 * defined, the Resolution Multiplier is associated with all 1114 * controls in the report." 1115 * HID Usage Table, v1.12, Section 4.3.1, p30 1116 * 1117 * Thus, search from the current collection upwards until we find a 1118 * logical collection. Then search all fields for that same parent 1119 * collection. Those are the fields the multiplier applies to. 1120 * 1121 * If we have more than one multiplier, it will overwrite the 1122 * applicable fields later. 1123 */ 1124 multiplier_collection = &hid->collection[multiplier->usage->collection_index]; 1125 while (multiplier_collection->parent_idx != -1 && 1126 multiplier_collection->type != HID_COLLECTION_LOGICAL) 1127 multiplier_collection = &hid->collection[multiplier_collection->parent_idx]; 1128 1129 effective_multiplier = hid_calculate_multiplier(hid, multiplier); 1130 1131 rep_enum = &hid->report_enum[HID_INPUT_REPORT]; 1132 list_for_each_entry(rep, &rep_enum->report_list, list) { 1133 for (i = 0; i < rep->maxfield; i++) { 1134 field = rep->field[i]; 1135 hid_apply_multiplier_to_field(hid, field, 1136 multiplier_collection, 1137 effective_multiplier); 1138 } 1139 } 1140 } 1141 1142 /* 1143 * hid_setup_resolution_multiplier - set up all resolution multipliers 1144 * 1145 * @device: hid device 1146 * 1147 * Search for all Resolution Multiplier Feature Reports and apply their 1148 * value to all matching Input items. This only updates the internal struct 1149 * fields. 1150 * 1151 * The Resolution Multiplier is applied by the hardware. If the multiplier 1152 * is anything other than 1, the hardware will send pre-multiplied events 1153 * so that the same physical interaction generates an accumulated 1154 * accumulated_value = value * * multiplier 1155 * This may be achieved by sending 1156 * - "value * multiplier" for each event, or 1157 * - "value" but "multiplier" times as frequently, or 1158 * - a combination of the above 1159 * The only guarantee is that the same physical interaction always generates 1160 * an accumulated 'value * multiplier'. 1161 * 1162 * This function must be called before any event processing and after 1163 * any SetRequest to the Resolution Multiplier. 1164 */ 1165 void hid_setup_resolution_multiplier(struct hid_device *hid) 1166 { 1167 struct hid_report_enum *rep_enum; 1168 struct hid_report *rep; 1169 struct hid_usage *usage; 1170 int i, j; 1171 1172 rep_enum = &hid->report_enum[HID_FEATURE_REPORT]; 1173 list_for_each_entry(rep, &rep_enum->report_list, list) { 1174 for (i = 0; i < rep->maxfield; i++) { 1175 /* Ignore if report count is out of bounds. */ 1176 if (rep->field[i]->report_count < 1) 1177 continue; 1178 1179 for (j = 0; j < rep->field[i]->maxusage; j++) { 1180 usage = &rep->field[i]->usage[j]; 1181 if (usage->hid == HID_GD_RESOLUTION_MULTIPLIER) 1182 hid_apply_multiplier(hid, 1183 rep->field[i]); 1184 } 1185 } 1186 } 1187 } 1188 EXPORT_SYMBOL_GPL(hid_setup_resolution_multiplier); 1189 1190 /** 1191 * hid_open_report - open a driver-specific device report 1192 * 1193 * @device: hid device 1194 * 1195 * Parse a report description into a hid_device structure. Reports are 1196 * enumerated, fields are attached to these reports. 1197 * 0 returned on success, otherwise nonzero error value. 1198 * 1199 * This function (or the equivalent hid_parse() macro) should only be 1200 * called from probe() in drivers, before starting the device. 1201 */ 1202 int hid_open_report(struct hid_device *device) 1203 { 1204 struct hid_parser *parser; 1205 struct hid_item item; 1206 unsigned int size; 1207 __u8 *start; 1208 __u8 *buf; 1209 __u8 *end; 1210 __u8 *next; 1211 int ret; 1212 int i; 1213 static int (*dispatch_type[])(struct hid_parser *parser, 1214 struct hid_item *item) = { 1215 hid_parser_main, 1216 hid_parser_global, 1217 hid_parser_local, 1218 hid_parser_reserved 1219 }; 1220 1221 if (WARN_ON(device->status & HID_STAT_PARSED)) 1222 return -EBUSY; 1223 1224 start = device->dev_rdesc; 1225 if (WARN_ON(!start)) 1226 return -ENODEV; 1227 size = device->dev_rsize; 1228 1229 /* call_hid_bpf_rdesc_fixup() ensures we work on a copy of rdesc */ 1230 buf = call_hid_bpf_rdesc_fixup(device, start, &size); 1231 if (buf == NULL) 1232 return -ENOMEM; 1233 1234 if (device->driver->report_fixup) 1235 start = device->driver->report_fixup(device, buf, &size); 1236 else 1237 start = buf; 1238 1239 start = kmemdup(start, size, GFP_KERNEL); 1240 kfree(buf); 1241 if (start == NULL) 1242 return -ENOMEM; 1243 1244 device->rdesc = start; 1245 device->rsize = size; 1246 1247 parser = vzalloc(sizeof(struct hid_parser)); 1248 if (!parser) { 1249 ret = -ENOMEM; 1250 goto alloc_err; 1251 } 1252 1253 parser->device = device; 1254 1255 end = start + size; 1256 1257 device->collection = kcalloc(HID_DEFAULT_NUM_COLLECTIONS, 1258 sizeof(struct hid_collection), GFP_KERNEL); 1259 if (!device->collection) { 1260 ret = -ENOMEM; 1261 goto err; 1262 } 1263 device->collection_size = HID_DEFAULT_NUM_COLLECTIONS; 1264 for (i = 0; i < HID_DEFAULT_NUM_COLLECTIONS; i++) 1265 device->collection[i].parent_idx = -1; 1266 1267 ret = -EINVAL; 1268 while ((next = fetch_item(start, end, &item)) != NULL) { 1269 start = next; 1270 1271 if (item.format != HID_ITEM_FORMAT_SHORT) { 1272 hid_err(device, "unexpected long global item\n"); 1273 goto err; 1274 } 1275 1276 if (dispatch_type[item.type](parser, &item)) { 1277 hid_err(device, "item %u %u %u %u parsing failed\n", 1278 item.format, (unsigned)item.size, 1279 (unsigned)item.type, (unsigned)item.tag); 1280 goto err; 1281 } 1282 1283 if (start == end) { 1284 if (parser->collection_stack_ptr) { 1285 hid_err(device, "unbalanced collection at end of report description\n"); 1286 goto err; 1287 } 1288 if (parser->local.delimiter_depth) { 1289 hid_err(device, "unbalanced delimiter at end of report description\n"); 1290 goto err; 1291 } 1292 1293 /* 1294 * fetch initial values in case the device's 1295 * default multiplier isn't the recommended 1 1296 */ 1297 hid_setup_resolution_multiplier(device); 1298 1299 kfree(parser->collection_stack); 1300 vfree(parser); 1301 device->status |= HID_STAT_PARSED; 1302 1303 return 0; 1304 } 1305 } 1306 1307 hid_err(device, "item fetching failed at offset %u/%u\n", 1308 size - (unsigned int)(end - start), size); 1309 err: 1310 kfree(parser->collection_stack); 1311 alloc_err: 1312 vfree(parser); 1313 hid_close_report(device); 1314 return ret; 1315 } 1316 EXPORT_SYMBOL_GPL(hid_open_report); 1317 1318 /* 1319 * Convert a signed n-bit integer to signed 32-bit integer. Common 1320 * cases are done through the compiler, the screwed things has to be 1321 * done by hand. 1322 */ 1323 1324 static s32 snto32(__u32 value, unsigned n) 1325 { 1326 if (!value || !n) 1327 return 0; 1328 1329 if (n > 32) 1330 n = 32; 1331 1332 switch (n) { 1333 case 8: return ((__s8)value); 1334 case 16: return ((__s16)value); 1335 case 32: return ((__s32)value); 1336 } 1337 return value & (1 << (n - 1)) ? value | (~0U << n) : value; 1338 } 1339 1340 s32 hid_snto32(__u32 value, unsigned n) 1341 { 1342 return snto32(value, n); 1343 } 1344 EXPORT_SYMBOL_GPL(hid_snto32); 1345 1346 /* 1347 * Convert a signed 32-bit integer to a signed n-bit integer. 1348 */ 1349 1350 static u32 s32ton(__s32 value, unsigned n) 1351 { 1352 s32 a = value >> (n - 1); 1353 if (a && a != -1) 1354 return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1; 1355 return value & ((1 << n) - 1); 1356 } 1357 1358 /* 1359 * Extract/implement a data field from/to a little endian report (bit array). 1360 * 1361 * Code sort-of follows HID spec: 1362 * http://www.usb.org/developers/hidpage/HID1_11.pdf 1363 * 1364 * While the USB HID spec allows unlimited length bit fields in "report 1365 * descriptors", most devices never use more than 16 bits. 1366 * One model of UPS is claimed to report "LINEV" as a 32-bit field. 1367 * Search linux-kernel and linux-usb-devel archives for "hid-core extract". 1368 */ 1369 1370 static u32 __extract(u8 *report, unsigned offset, int n) 1371 { 1372 unsigned int idx = offset / 8; 1373 unsigned int bit_nr = 0; 1374 unsigned int bit_shift = offset % 8; 1375 int bits_to_copy = 8 - bit_shift; 1376 u32 value = 0; 1377 u32 mask = n < 32 ? (1U << n) - 1 : ~0U; 1378 1379 while (n > 0) { 1380 value |= ((u32)report[idx] >> bit_shift) << bit_nr; 1381 n -= bits_to_copy; 1382 bit_nr += bits_to_copy; 1383 bits_to_copy = 8; 1384 bit_shift = 0; 1385 idx++; 1386 } 1387 1388 return value & mask; 1389 } 1390 1391 u32 hid_field_extract(const struct hid_device *hid, u8 *report, 1392 unsigned offset, unsigned n) 1393 { 1394 if (n > 32) { 1395 hid_warn_once(hid, "%s() called with n (%d) > 32! (%s)\n", 1396 __func__, n, current->comm); 1397 n = 32; 1398 } 1399 1400 return __extract(report, offset, n); 1401 } 1402 EXPORT_SYMBOL_GPL(hid_field_extract); 1403 1404 /* 1405 * "implement" : set bits in a little endian bit stream. 1406 * Same concepts as "extract" (see comments above). 1407 * The data mangled in the bit stream remains in little endian 1408 * order the whole time. It make more sense to talk about 1409 * endianness of register values by considering a register 1410 * a "cached" copy of the little endian bit stream. 1411 */ 1412 1413 static void __implement(u8 *report, unsigned offset, int n, u32 value) 1414 { 1415 unsigned int idx = offset / 8; 1416 unsigned int bit_shift = offset % 8; 1417 int bits_to_set = 8 - bit_shift; 1418 1419 while (n - bits_to_set >= 0) { 1420 report[idx] &= ~(0xff << bit_shift); 1421 report[idx] |= value << bit_shift; 1422 value >>= bits_to_set; 1423 n -= bits_to_set; 1424 bits_to_set = 8; 1425 bit_shift = 0; 1426 idx++; 1427 } 1428 1429 /* last nibble */ 1430 if (n) { 1431 u8 bit_mask = ((1U << n) - 1); 1432 report[idx] &= ~(bit_mask << bit_shift); 1433 report[idx] |= value << bit_shift; 1434 } 1435 } 1436 1437 static void implement(const struct hid_device *hid, u8 *report, 1438 unsigned offset, unsigned n, u32 value) 1439 { 1440 if (unlikely(n > 32)) { 1441 hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n", 1442 __func__, n, current->comm); 1443 n = 32; 1444 } else if (n < 32) { 1445 u32 m = (1U << n) - 1; 1446 1447 if (unlikely(value > m)) { 1448 hid_warn(hid, 1449 "%s() called with too large value %d (n: %d)! (%s)\n", 1450 __func__, value, n, current->comm); 1451 value &= m; 1452 } 1453 } 1454 1455 __implement(report, offset, n, value); 1456 } 1457 1458 /* 1459 * Search an array for a value. 1460 */ 1461 1462 static int search(__s32 *array, __s32 value, unsigned n) 1463 { 1464 while (n--) { 1465 if (*array++ == value) 1466 return 0; 1467 } 1468 return -1; 1469 } 1470 1471 /** 1472 * hid_match_report - check if driver's raw_event should be called 1473 * 1474 * @hid: hid device 1475 * @report: hid report to match against 1476 * 1477 * compare hid->driver->report_table->report_type to report->type 1478 */ 1479 static int hid_match_report(struct hid_device *hid, struct hid_report *report) 1480 { 1481 const struct hid_report_id *id = hid->driver->report_table; 1482 1483 if (!id) /* NULL means all */ 1484 return 1; 1485 1486 for (; id->report_type != HID_TERMINATOR; id++) 1487 if (id->report_type == HID_ANY_ID || 1488 id->report_type == report->type) 1489 return 1; 1490 return 0; 1491 } 1492 1493 /** 1494 * hid_match_usage - check if driver's event should be called 1495 * 1496 * @hid: hid device 1497 * @usage: usage to match against 1498 * 1499 * compare hid->driver->usage_table->usage_{type,code} to 1500 * usage->usage_{type,code} 1501 */ 1502 static int hid_match_usage(struct hid_device *hid, struct hid_usage *usage) 1503 { 1504 const struct hid_usage_id *id = hid->driver->usage_table; 1505 1506 if (!id) /* NULL means all */ 1507 return 1; 1508 1509 for (; id->usage_type != HID_ANY_ID - 1; id++) 1510 if ((id->usage_hid == HID_ANY_ID || 1511 id->usage_hid == usage->hid) && 1512 (id->usage_type == HID_ANY_ID || 1513 id->usage_type == usage->type) && 1514 (id->usage_code == HID_ANY_ID || 1515 id->usage_code == usage->code)) 1516 return 1; 1517 return 0; 1518 } 1519 1520 static void hid_process_event(struct hid_device *hid, struct hid_field *field, 1521 struct hid_usage *usage, __s32 value, int interrupt) 1522 { 1523 struct hid_driver *hdrv = hid->driver; 1524 int ret; 1525 1526 if (!list_empty(&hid->debug_list)) 1527 hid_dump_input(hid, usage, value); 1528 1529 if (hdrv && hdrv->event && hid_match_usage(hid, usage)) { 1530 ret = hdrv->event(hid, field, usage, value); 1531 if (ret != 0) { 1532 if (ret < 0) 1533 hid_err(hid, "%s's event failed with %d\n", 1534 hdrv->name, ret); 1535 return; 1536 } 1537 } 1538 1539 if (hid->claimed & HID_CLAIMED_INPUT) 1540 hidinput_hid_event(hid, field, usage, value); 1541 if (hid->claimed & HID_CLAIMED_HIDDEV && interrupt && hid->hiddev_hid_event) 1542 hid->hiddev_hid_event(hid, field, usage, value); 1543 } 1544 1545 /* 1546 * Checks if the given value is valid within this field 1547 */ 1548 static inline int hid_array_value_is_valid(struct hid_field *field, 1549 __s32 value) 1550 { 1551 __s32 min = field->logical_minimum; 1552 1553 /* 1554 * Value needs to be between logical min and max, and 1555 * (value - min) is used as an index in the usage array. 1556 * This array is of size field->maxusage 1557 */ 1558 return value >= min && 1559 value <= field->logical_maximum && 1560 value - min < field->maxusage; 1561 } 1562 1563 /* 1564 * Fetch the field from the data. The field content is stored for next 1565 * report processing (we do differential reporting to the layer). 1566 */ 1567 static void hid_input_fetch_field(struct hid_device *hid, 1568 struct hid_field *field, 1569 __u8 *data) 1570 { 1571 unsigned n; 1572 unsigned count = field->report_count; 1573 unsigned offset = field->report_offset; 1574 unsigned size = field->report_size; 1575 __s32 min = field->logical_minimum; 1576 __s32 *value; 1577 1578 value = field->new_value; 1579 memset(value, 0, count * sizeof(__s32)); 1580 field->ignored = false; 1581 1582 for (n = 0; n < count; n++) { 1583 1584 value[n] = min < 0 ? 1585 snto32(hid_field_extract(hid, data, offset + n * size, 1586 size), size) : 1587 hid_field_extract(hid, data, offset + n * size, size); 1588 1589 /* Ignore report if ErrorRollOver */ 1590 if (!(field->flags & HID_MAIN_ITEM_VARIABLE) && 1591 hid_array_value_is_valid(field, value[n]) && 1592 field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1) { 1593 field->ignored = true; 1594 return; 1595 } 1596 } 1597 } 1598 1599 /* 1600 * Process a received variable field. 1601 */ 1602 1603 static void hid_input_var_field(struct hid_device *hid, 1604 struct hid_field *field, 1605 int interrupt) 1606 { 1607 unsigned int count = field->report_count; 1608 __s32 *value = field->new_value; 1609 unsigned int n; 1610 1611 for (n = 0; n < count; n++) 1612 hid_process_event(hid, 1613 field, 1614 &field->usage[n], 1615 value[n], 1616 interrupt); 1617 1618 memcpy(field->value, value, count * sizeof(__s32)); 1619 } 1620 1621 /* 1622 * Process a received array field. The field content is stored for 1623 * next report processing (we do differential reporting to the layer). 1624 */ 1625 1626 static void hid_input_array_field(struct hid_device *hid, 1627 struct hid_field *field, 1628 int interrupt) 1629 { 1630 unsigned int n; 1631 unsigned int count = field->report_count; 1632 __s32 min = field->logical_minimum; 1633 __s32 *value; 1634 1635 value = field->new_value; 1636 1637 /* ErrorRollOver */ 1638 if (field->ignored) 1639 return; 1640 1641 for (n = 0; n < count; n++) { 1642 if (hid_array_value_is_valid(field, field->value[n]) && 1643 search(value, field->value[n], count)) 1644 hid_process_event(hid, 1645 field, 1646 &field->usage[field->value[n] - min], 1647 0, 1648 interrupt); 1649 1650 if (hid_array_value_is_valid(field, value[n]) && 1651 search(field->value, value[n], count)) 1652 hid_process_event(hid, 1653 field, 1654 &field->usage[value[n] - min], 1655 1, 1656 interrupt); 1657 } 1658 1659 memcpy(field->value, value, count * sizeof(__s32)); 1660 } 1661 1662 /* 1663 * Analyse a received report, and fetch the data from it. The field 1664 * content is stored for next report processing (we do differential 1665 * reporting to the layer). 1666 */ 1667 static void hid_process_report(struct hid_device *hid, 1668 struct hid_report *report, 1669 __u8 *data, 1670 int interrupt) 1671 { 1672 unsigned int a; 1673 struct hid_field_entry *entry; 1674 struct hid_field *field; 1675 1676 /* first retrieve all incoming values in data */ 1677 for (a = 0; a < report->maxfield; a++) 1678 hid_input_fetch_field(hid, report->field[a], data); 1679 1680 if (!list_empty(&report->field_entry_list)) { 1681 /* INPUT_REPORT, we have a priority list of fields */ 1682 list_for_each_entry(entry, 1683 &report->field_entry_list, 1684 list) { 1685 field = entry->field; 1686 1687 if (field->flags & HID_MAIN_ITEM_VARIABLE) 1688 hid_process_event(hid, 1689 field, 1690 &field->usage[entry->index], 1691 field->new_value[entry->index], 1692 interrupt); 1693 else 1694 hid_input_array_field(hid, field, interrupt); 1695 } 1696 1697 /* we need to do the memcpy at the end for var items */ 1698 for (a = 0; a < report->maxfield; a++) { 1699 field = report->field[a]; 1700 1701 if (field->flags & HID_MAIN_ITEM_VARIABLE) 1702 memcpy(field->value, field->new_value, 1703 field->report_count * sizeof(__s32)); 1704 } 1705 } else { 1706 /* FEATURE_REPORT, regular processing */ 1707 for (a = 0; a < report->maxfield; a++) { 1708 field = report->field[a]; 1709 1710 if (field->flags & HID_MAIN_ITEM_VARIABLE) 1711 hid_input_var_field(hid, field, interrupt); 1712 else 1713 hid_input_array_field(hid, field, interrupt); 1714 } 1715 } 1716 } 1717 1718 /* 1719 * Insert a given usage_index in a field in the list 1720 * of processed usages in the report. 1721 * 1722 * The elements of lower priority score are processed 1723 * first. 1724 */ 1725 static void __hid_insert_field_entry(struct hid_device *hid, 1726 struct hid_report *report, 1727 struct hid_field_entry *entry, 1728 struct hid_field *field, 1729 unsigned int usage_index) 1730 { 1731 struct hid_field_entry *next; 1732 1733 entry->field = field; 1734 entry->index = usage_index; 1735 entry->priority = field->usages_priorities[usage_index]; 1736 1737 /* insert the element at the correct position */ 1738 list_for_each_entry(next, 1739 &report->field_entry_list, 1740 list) { 1741 /* 1742 * the priority of our element is strictly higher 1743 * than the next one, insert it before 1744 */ 1745 if (entry->priority > next->priority) { 1746 list_add_tail(&entry->list, &next->list); 1747 return; 1748 } 1749 } 1750 1751 /* lowest priority score: insert at the end */ 1752 list_add_tail(&entry->list, &report->field_entry_list); 1753 } 1754 1755 static void hid_report_process_ordering(struct hid_device *hid, 1756 struct hid_report *report) 1757 { 1758 struct hid_field *field; 1759 struct hid_field_entry *entries; 1760 unsigned int a, u, usages; 1761 unsigned int count = 0; 1762 1763 /* count the number of individual fields in the report */ 1764 for (a = 0; a < report->maxfield; a++) { 1765 field = report->field[a]; 1766 1767 if (field->flags & HID_MAIN_ITEM_VARIABLE) 1768 count += field->report_count; 1769 else 1770 count++; 1771 } 1772 1773 /* allocate the memory to process the fields */ 1774 entries = kcalloc(count, sizeof(*entries), GFP_KERNEL); 1775 if (!entries) 1776 return; 1777 1778 report->field_entries = entries; 1779 1780 /* 1781 * walk through all fields in the report and 1782 * store them by priority order in report->field_entry_list 1783 * 1784 * - Var elements are individualized (field + usage_index) 1785 * - Arrays are taken as one, we can not chose an order for them 1786 */ 1787 usages = 0; 1788 for (a = 0; a < report->maxfield; a++) { 1789 field = report->field[a]; 1790 1791 if (field->flags & HID_MAIN_ITEM_VARIABLE) { 1792 for (u = 0; u < field->report_count; u++) { 1793 __hid_insert_field_entry(hid, report, 1794 &entries[usages], 1795 field, u); 1796 usages++; 1797 } 1798 } else { 1799 __hid_insert_field_entry(hid, report, &entries[usages], 1800 field, 0); 1801 usages++; 1802 } 1803 } 1804 } 1805 1806 static void hid_process_ordering(struct hid_device *hid) 1807 { 1808 struct hid_report *report; 1809 struct hid_report_enum *report_enum = &hid->report_enum[HID_INPUT_REPORT]; 1810 1811 list_for_each_entry(report, &report_enum->report_list, list) 1812 hid_report_process_ordering(hid, report); 1813 } 1814 1815 /* 1816 * Output the field into the report. 1817 */ 1818 1819 static void hid_output_field(const struct hid_device *hid, 1820 struct hid_field *field, __u8 *data) 1821 { 1822 unsigned count = field->report_count; 1823 unsigned offset = field->report_offset; 1824 unsigned size = field->report_size; 1825 unsigned n; 1826 1827 for (n = 0; n < count; n++) { 1828 if (field->logical_minimum < 0) /* signed values */ 1829 implement(hid, data, offset + n * size, size, 1830 s32ton(field->value[n], size)); 1831 else /* unsigned values */ 1832 implement(hid, data, offset + n * size, size, 1833 field->value[n]); 1834 } 1835 } 1836 1837 /* 1838 * Compute the size of a report. 1839 */ 1840 static size_t hid_compute_report_size(struct hid_report *report) 1841 { 1842 if (report->size) 1843 return ((report->size - 1) >> 3) + 1; 1844 1845 return 0; 1846 } 1847 1848 /* 1849 * Create a report. 'data' has to be allocated using 1850 * hid_alloc_report_buf() so that it has proper size. 1851 */ 1852 1853 void hid_output_report(struct hid_report *report, __u8 *data) 1854 { 1855 unsigned n; 1856 1857 if (report->id > 0) 1858 *data++ = report->id; 1859 1860 memset(data, 0, hid_compute_report_size(report)); 1861 for (n = 0; n < report->maxfield; n++) 1862 hid_output_field(report->device, report->field[n], data); 1863 } 1864 EXPORT_SYMBOL_GPL(hid_output_report); 1865 1866 /* 1867 * Allocator for buffer that is going to be passed to hid_output_report() 1868 */ 1869 u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags) 1870 { 1871 /* 1872 * 7 extra bytes are necessary to achieve proper functionality 1873 * of implement() working on 8 byte chunks 1874 */ 1875 1876 u32 len = hid_report_len(report) + 7; 1877 1878 return kmalloc(len, flags); 1879 } 1880 EXPORT_SYMBOL_GPL(hid_alloc_report_buf); 1881 1882 /* 1883 * Set a field value. The report this field belongs to has to be 1884 * created and transferred to the device, to set this value in the 1885 * device. 1886 */ 1887 1888 int hid_set_field(struct hid_field *field, unsigned offset, __s32 value) 1889 { 1890 unsigned size; 1891 1892 if (!field) 1893 return -1; 1894 1895 size = field->report_size; 1896 1897 hid_dump_input(field->report->device, field->usage + offset, value); 1898 1899 if (offset >= field->report_count) { 1900 hid_err(field->report->device, "offset (%d) exceeds report_count (%d)\n", 1901 offset, field->report_count); 1902 return -1; 1903 } 1904 if (field->logical_minimum < 0) { 1905 if (value != snto32(s32ton(value, size), size)) { 1906 hid_err(field->report->device, "value %d is out of range\n", value); 1907 return -1; 1908 } 1909 } 1910 field->value[offset] = value; 1911 return 0; 1912 } 1913 EXPORT_SYMBOL_GPL(hid_set_field); 1914 1915 static struct hid_report *hid_get_report(struct hid_report_enum *report_enum, 1916 const u8 *data) 1917 { 1918 struct hid_report *report; 1919 unsigned int n = 0; /* Normally report number is 0 */ 1920 1921 /* Device uses numbered reports, data[0] is report number */ 1922 if (report_enum->numbered) 1923 n = *data; 1924 1925 report = report_enum->report_id_hash[n]; 1926 if (report == NULL) 1927 dbg_hid("undefined report_id %u received\n", n); 1928 1929 return report; 1930 } 1931 1932 /* 1933 * Implement a generic .request() callback, using .raw_request() 1934 * DO NOT USE in hid drivers directly, but through hid_hw_request instead. 1935 */ 1936 int __hid_request(struct hid_device *hid, struct hid_report *report, 1937 enum hid_class_request reqtype) 1938 { 1939 char *buf; 1940 int ret; 1941 u32 len; 1942 1943 buf = hid_alloc_report_buf(report, GFP_KERNEL); 1944 if (!buf) 1945 return -ENOMEM; 1946 1947 len = hid_report_len(report); 1948 1949 if (reqtype == HID_REQ_SET_REPORT) 1950 hid_output_report(report, buf); 1951 1952 ret = hid->ll_driver->raw_request(hid, report->id, buf, len, 1953 report->type, reqtype); 1954 if (ret < 0) { 1955 dbg_hid("unable to complete request: %d\n", ret); 1956 goto out; 1957 } 1958 1959 if (reqtype == HID_REQ_GET_REPORT) 1960 hid_input_report(hid, report->type, buf, ret, 0); 1961 1962 ret = 0; 1963 1964 out: 1965 kfree(buf); 1966 return ret; 1967 } 1968 EXPORT_SYMBOL_GPL(__hid_request); 1969 1970 int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size, 1971 int interrupt) 1972 { 1973 struct hid_report_enum *report_enum = hid->report_enum + type; 1974 struct hid_report *report; 1975 struct hid_driver *hdrv; 1976 int max_buffer_size = HID_MAX_BUFFER_SIZE; 1977 u32 rsize, csize = size; 1978 u8 *cdata = data; 1979 int ret = 0; 1980 1981 report = hid_get_report(report_enum, data); 1982 if (!report) 1983 goto out; 1984 1985 if (report_enum->numbered) { 1986 cdata++; 1987 csize--; 1988 } 1989 1990 rsize = hid_compute_report_size(report); 1991 1992 if (hid->ll_driver->max_buffer_size) 1993 max_buffer_size = hid->ll_driver->max_buffer_size; 1994 1995 if (report_enum->numbered && rsize >= max_buffer_size) 1996 rsize = max_buffer_size - 1; 1997 else if (rsize > max_buffer_size) 1998 rsize = max_buffer_size; 1999 2000 if (csize < rsize) { 2001 dbg_hid("report %d is too short, (%d < %d)\n", report->id, 2002 csize, rsize); 2003 memset(cdata + csize, 0, rsize - csize); 2004 } 2005 2006 if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event) 2007 hid->hiddev_report_event(hid, report); 2008 if (hid->claimed & HID_CLAIMED_HIDRAW) { 2009 ret = hidraw_report_event(hid, data, size); 2010 if (ret) 2011 goto out; 2012 } 2013 2014 if (hid->claimed != HID_CLAIMED_HIDRAW && report->maxfield) { 2015 hid_process_report(hid, report, cdata, interrupt); 2016 hdrv = hid->driver; 2017 if (hdrv && hdrv->report) 2018 hdrv->report(hid, report); 2019 } 2020 2021 if (hid->claimed & HID_CLAIMED_INPUT) 2022 hidinput_report_event(hid, report); 2023 out: 2024 return ret; 2025 } 2026 EXPORT_SYMBOL_GPL(hid_report_raw_event); 2027 2028 2029 static int __hid_input_report(struct hid_device *hid, enum hid_report_type type, 2030 u8 *data, u32 size, int interrupt, u64 source, bool from_bpf, 2031 bool lock_already_taken) 2032 { 2033 struct hid_report_enum *report_enum; 2034 struct hid_driver *hdrv; 2035 struct hid_report *report; 2036 int ret = 0; 2037 2038 if (!hid) 2039 return -ENODEV; 2040 2041 ret = down_trylock(&hid->driver_input_lock); 2042 if (lock_already_taken && !ret) { 2043 up(&hid->driver_input_lock); 2044 return -EINVAL; 2045 } else if (!lock_already_taken && ret) { 2046 return -EBUSY; 2047 } 2048 2049 if (!hid->driver) { 2050 ret = -ENODEV; 2051 goto unlock; 2052 } 2053 report_enum = hid->report_enum + type; 2054 hdrv = hid->driver; 2055 2056 data = dispatch_hid_bpf_device_event(hid, type, data, &size, interrupt, source, from_bpf); 2057 if (IS_ERR(data)) { 2058 ret = PTR_ERR(data); 2059 goto unlock; 2060 } 2061 2062 if (!size) { 2063 dbg_hid("empty report\n"); 2064 ret = -1; 2065 goto unlock; 2066 } 2067 2068 /* Avoid unnecessary overhead if debugfs is disabled */ 2069 if (!list_empty(&hid->debug_list)) 2070 hid_dump_report(hid, type, data, size); 2071 2072 report = hid_get_report(report_enum, data); 2073 2074 if (!report) { 2075 ret = -1; 2076 goto unlock; 2077 } 2078 2079 if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) { 2080 ret = hdrv->raw_event(hid, report, data, size); 2081 if (ret < 0) 2082 goto unlock; 2083 } 2084 2085 ret = hid_report_raw_event(hid, type, data, size, interrupt); 2086 2087 unlock: 2088 if (!lock_already_taken) 2089 up(&hid->driver_input_lock); 2090 return ret; 2091 } 2092 2093 /** 2094 * hid_input_report - report data from lower layer (usb, bt...) 2095 * 2096 * @hid: hid device 2097 * @type: HID report type (HID_*_REPORT) 2098 * @data: report contents 2099 * @size: size of data parameter 2100 * @interrupt: distinguish between interrupt and control transfers 2101 * 2102 * This is data entry for lower layers. 2103 */ 2104 int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size, 2105 int interrupt) 2106 { 2107 return __hid_input_report(hid, type, data, size, interrupt, 0, 2108 false, /* from_bpf */ 2109 false /* lock_already_taken */); 2110 } 2111 EXPORT_SYMBOL_GPL(hid_input_report); 2112 2113 bool hid_match_one_id(const struct hid_device *hdev, 2114 const struct hid_device_id *id) 2115 { 2116 return (id->bus == HID_BUS_ANY || id->bus == hdev->bus) && 2117 (id->group == HID_GROUP_ANY || id->group == hdev->group) && 2118 (id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) && 2119 (id->product == HID_ANY_ID || id->product == hdev->product); 2120 } 2121 2122 const struct hid_device_id *hid_match_id(const struct hid_device *hdev, 2123 const struct hid_device_id *id) 2124 { 2125 for (; id->bus; id++) 2126 if (hid_match_one_id(hdev, id)) 2127 return id; 2128 2129 return NULL; 2130 } 2131 EXPORT_SYMBOL_GPL(hid_match_id); 2132 2133 static const struct hid_device_id hid_hiddev_list[] = { 2134 { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS) }, 2135 { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS1) }, 2136 { } 2137 }; 2138 2139 static bool hid_hiddev(struct hid_device *hdev) 2140 { 2141 return !!hid_match_id(hdev, hid_hiddev_list); 2142 } 2143 2144 2145 static ssize_t 2146 read_report_descriptor(struct file *filp, struct kobject *kobj, 2147 struct bin_attribute *attr, 2148 char *buf, loff_t off, size_t count) 2149 { 2150 struct device *dev = kobj_to_dev(kobj); 2151 struct hid_device *hdev = to_hid_device(dev); 2152 2153 if (off >= hdev->rsize) 2154 return 0; 2155 2156 if (off + count > hdev->rsize) 2157 count = hdev->rsize - off; 2158 2159 memcpy(buf, hdev->rdesc + off, count); 2160 2161 return count; 2162 } 2163 2164 static ssize_t 2165 show_country(struct device *dev, struct device_attribute *attr, 2166 char *buf) 2167 { 2168 struct hid_device *hdev = to_hid_device(dev); 2169 2170 return sprintf(buf, "%02x\n", hdev->country & 0xff); 2171 } 2172 2173 static struct bin_attribute dev_bin_attr_report_desc = { 2174 .attr = { .name = "report_descriptor", .mode = 0444 }, 2175 .read = read_report_descriptor, 2176 .size = HID_MAX_DESCRIPTOR_SIZE, 2177 }; 2178 2179 static const struct device_attribute dev_attr_country = { 2180 .attr = { .name = "country", .mode = 0444 }, 2181 .show = show_country, 2182 }; 2183 2184 int hid_connect(struct hid_device *hdev, unsigned int connect_mask) 2185 { 2186 static const char *types[] = { "Device", "Pointer", "Mouse", "Device", 2187 "Joystick", "Gamepad", "Keyboard", "Keypad", 2188 "Multi-Axis Controller" 2189 }; 2190 const char *type, *bus; 2191 char buf[64] = ""; 2192 unsigned int i; 2193 int len; 2194 int ret; 2195 2196 ret = hid_bpf_connect_device(hdev); 2197 if (ret) 2198 return ret; 2199 2200 if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE) 2201 connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV); 2202 if (hdev->quirks & HID_QUIRK_HIDINPUT_FORCE) 2203 connect_mask |= HID_CONNECT_HIDINPUT_FORCE; 2204 if (hdev->bus != BUS_USB) 2205 connect_mask &= ~HID_CONNECT_HIDDEV; 2206 if (hid_hiddev(hdev)) 2207 connect_mask |= HID_CONNECT_HIDDEV_FORCE; 2208 2209 if ((connect_mask & HID_CONNECT_HIDINPUT) && !hidinput_connect(hdev, 2210 connect_mask & HID_CONNECT_HIDINPUT_FORCE)) 2211 hdev->claimed |= HID_CLAIMED_INPUT; 2212 2213 if ((connect_mask & HID_CONNECT_HIDDEV) && hdev->hiddev_connect && 2214 !hdev->hiddev_connect(hdev, 2215 connect_mask & HID_CONNECT_HIDDEV_FORCE)) 2216 hdev->claimed |= HID_CLAIMED_HIDDEV; 2217 if ((connect_mask & HID_CONNECT_HIDRAW) && !hidraw_connect(hdev)) 2218 hdev->claimed |= HID_CLAIMED_HIDRAW; 2219 2220 if (connect_mask & HID_CONNECT_DRIVER) 2221 hdev->claimed |= HID_CLAIMED_DRIVER; 2222 2223 /* Drivers with the ->raw_event callback set are not required to connect 2224 * to any other listener. */ 2225 if (!hdev->claimed && !hdev->driver->raw_event) { 2226 hid_err(hdev, "device has no listeners, quitting\n"); 2227 return -ENODEV; 2228 } 2229 2230 hid_process_ordering(hdev); 2231 2232 if ((hdev->claimed & HID_CLAIMED_INPUT) && 2233 (connect_mask & HID_CONNECT_FF) && hdev->ff_init) 2234 hdev->ff_init(hdev); 2235 2236 len = 0; 2237 if (hdev->claimed & HID_CLAIMED_INPUT) 2238 len += sprintf(buf + len, "input"); 2239 if (hdev->claimed & HID_CLAIMED_HIDDEV) 2240 len += sprintf(buf + len, "%shiddev%d", len ? "," : "", 2241 ((struct hiddev *)hdev->hiddev)->minor); 2242 if (hdev->claimed & HID_CLAIMED_HIDRAW) 2243 len += sprintf(buf + len, "%shidraw%d", len ? "," : "", 2244 ((struct hidraw *)hdev->hidraw)->minor); 2245 2246 type = "Device"; 2247 for (i = 0; i < hdev->maxcollection; i++) { 2248 struct hid_collection *col = &hdev->collection[i]; 2249 if (col->type == HID_COLLECTION_APPLICATION && 2250 (col->usage & HID_USAGE_PAGE) == HID_UP_GENDESK && 2251 (col->usage & 0xffff) < ARRAY_SIZE(types)) { 2252 type = types[col->usage & 0xffff]; 2253 break; 2254 } 2255 } 2256 2257 switch (hdev->bus) { 2258 case BUS_USB: 2259 bus = "USB"; 2260 break; 2261 case BUS_BLUETOOTH: 2262 bus = "BLUETOOTH"; 2263 break; 2264 case BUS_I2C: 2265 bus = "I2C"; 2266 break; 2267 case BUS_VIRTUAL: 2268 bus = "VIRTUAL"; 2269 break; 2270 case BUS_INTEL_ISHTP: 2271 case BUS_AMD_SFH: 2272 bus = "SENSOR HUB"; 2273 break; 2274 default: 2275 bus = "<UNKNOWN>"; 2276 } 2277 2278 ret = device_create_file(&hdev->dev, &dev_attr_country); 2279 if (ret) 2280 hid_warn(hdev, 2281 "can't create sysfs country code attribute err: %d\n", ret); 2282 2283 hid_info(hdev, "%s: %s HID v%x.%02x %s [%s] on %s\n", 2284 buf, bus, hdev->version >> 8, hdev->version & 0xff, 2285 type, hdev->name, hdev->phys); 2286 2287 return 0; 2288 } 2289 EXPORT_SYMBOL_GPL(hid_connect); 2290 2291 void hid_disconnect(struct hid_device *hdev) 2292 { 2293 device_remove_file(&hdev->dev, &dev_attr_country); 2294 if (hdev->claimed & HID_CLAIMED_INPUT) 2295 hidinput_disconnect(hdev); 2296 if (hdev->claimed & HID_CLAIMED_HIDDEV) 2297 hdev->hiddev_disconnect(hdev); 2298 if (hdev->claimed & HID_CLAIMED_HIDRAW) 2299 hidraw_disconnect(hdev); 2300 hdev->claimed = 0; 2301 2302 hid_bpf_disconnect_device(hdev); 2303 } 2304 EXPORT_SYMBOL_GPL(hid_disconnect); 2305 2306 /** 2307 * hid_hw_start - start underlying HW 2308 * @hdev: hid device 2309 * @connect_mask: which outputs to connect, see HID_CONNECT_* 2310 * 2311 * Call this in probe function *after* hid_parse. This will setup HW 2312 * buffers and start the device (if not defeirred to device open). 2313 * hid_hw_stop must be called if this was successful. 2314 */ 2315 int hid_hw_start(struct hid_device *hdev, unsigned int connect_mask) 2316 { 2317 int error; 2318 2319 error = hdev->ll_driver->start(hdev); 2320 if (error) 2321 return error; 2322 2323 if (connect_mask) { 2324 error = hid_connect(hdev, connect_mask); 2325 if (error) { 2326 hdev->ll_driver->stop(hdev); 2327 return error; 2328 } 2329 } 2330 2331 return 0; 2332 } 2333 EXPORT_SYMBOL_GPL(hid_hw_start); 2334 2335 /** 2336 * hid_hw_stop - stop underlying HW 2337 * @hdev: hid device 2338 * 2339 * This is usually called from remove function or from probe when something 2340 * failed and hid_hw_start was called already. 2341 */ 2342 void hid_hw_stop(struct hid_device *hdev) 2343 { 2344 hid_disconnect(hdev); 2345 hdev->ll_driver->stop(hdev); 2346 } 2347 EXPORT_SYMBOL_GPL(hid_hw_stop); 2348 2349 /** 2350 * hid_hw_open - signal underlying HW to start delivering events 2351 * @hdev: hid device 2352 * 2353 * Tell underlying HW to start delivering events from the device. 2354 * This function should be called sometime after successful call 2355 * to hid_hw_start(). 2356 */ 2357 int hid_hw_open(struct hid_device *hdev) 2358 { 2359 int ret; 2360 2361 ret = mutex_lock_killable(&hdev->ll_open_lock); 2362 if (ret) 2363 return ret; 2364 2365 if (!hdev->ll_open_count++) { 2366 ret = hdev->ll_driver->open(hdev); 2367 if (ret) 2368 hdev->ll_open_count--; 2369 } 2370 2371 mutex_unlock(&hdev->ll_open_lock); 2372 return ret; 2373 } 2374 EXPORT_SYMBOL_GPL(hid_hw_open); 2375 2376 /** 2377 * hid_hw_close - signal underlaying HW to stop delivering events 2378 * 2379 * @hdev: hid device 2380 * 2381 * This function indicates that we are not interested in the events 2382 * from this device anymore. Delivery of events may or may not stop, 2383 * depending on the number of users still outstanding. 2384 */ 2385 void hid_hw_close(struct hid_device *hdev) 2386 { 2387 mutex_lock(&hdev->ll_open_lock); 2388 if (!--hdev->ll_open_count) 2389 hdev->ll_driver->close(hdev); 2390 mutex_unlock(&hdev->ll_open_lock); 2391 } 2392 EXPORT_SYMBOL_GPL(hid_hw_close); 2393 2394 /** 2395 * hid_hw_request - send report request to device 2396 * 2397 * @hdev: hid device 2398 * @report: report to send 2399 * @reqtype: hid request type 2400 */ 2401 void hid_hw_request(struct hid_device *hdev, 2402 struct hid_report *report, enum hid_class_request reqtype) 2403 { 2404 if (hdev->ll_driver->request) 2405 return hdev->ll_driver->request(hdev, report, reqtype); 2406 2407 __hid_request(hdev, report, reqtype); 2408 } 2409 EXPORT_SYMBOL_GPL(hid_hw_request); 2410 2411 int __hid_hw_raw_request(struct hid_device *hdev, 2412 unsigned char reportnum, __u8 *buf, 2413 size_t len, enum hid_report_type rtype, 2414 enum hid_class_request reqtype, 2415 u64 source, bool from_bpf) 2416 { 2417 unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE; 2418 int ret; 2419 2420 if (hdev->ll_driver->max_buffer_size) 2421 max_buffer_size = hdev->ll_driver->max_buffer_size; 2422 2423 if (len < 1 || len > max_buffer_size || !buf) 2424 return -EINVAL; 2425 2426 ret = dispatch_hid_bpf_raw_requests(hdev, reportnum, buf, len, rtype, 2427 reqtype, source, from_bpf); 2428 if (ret) 2429 return ret; 2430 2431 return hdev->ll_driver->raw_request(hdev, reportnum, buf, len, 2432 rtype, reqtype); 2433 } 2434 2435 /** 2436 * hid_hw_raw_request - send report request to device 2437 * 2438 * @hdev: hid device 2439 * @reportnum: report ID 2440 * @buf: in/out data to transfer 2441 * @len: length of buf 2442 * @rtype: HID report type 2443 * @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT 2444 * 2445 * Return: count of data transferred, negative if error 2446 * 2447 * Same behavior as hid_hw_request, but with raw buffers instead. 2448 */ 2449 int hid_hw_raw_request(struct hid_device *hdev, 2450 unsigned char reportnum, __u8 *buf, 2451 size_t len, enum hid_report_type rtype, enum hid_class_request reqtype) 2452 { 2453 return __hid_hw_raw_request(hdev, reportnum, buf, len, rtype, reqtype, 0, false); 2454 } 2455 EXPORT_SYMBOL_GPL(hid_hw_raw_request); 2456 2457 int __hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len, u64 source, 2458 bool from_bpf) 2459 { 2460 unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE; 2461 int ret; 2462 2463 if (hdev->ll_driver->max_buffer_size) 2464 max_buffer_size = hdev->ll_driver->max_buffer_size; 2465 2466 if (len < 1 || len > max_buffer_size || !buf) 2467 return -EINVAL; 2468 2469 ret = dispatch_hid_bpf_output_report(hdev, buf, len, source, from_bpf); 2470 if (ret) 2471 return ret; 2472 2473 if (hdev->ll_driver->output_report) 2474 return hdev->ll_driver->output_report(hdev, buf, len); 2475 2476 return -ENOSYS; 2477 } 2478 2479 /** 2480 * hid_hw_output_report - send output report to device 2481 * 2482 * @hdev: hid device 2483 * @buf: raw data to transfer 2484 * @len: length of buf 2485 * 2486 * Return: count of data transferred, negative if error 2487 */ 2488 int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len) 2489 { 2490 return __hid_hw_output_report(hdev, buf, len, 0, false); 2491 } 2492 EXPORT_SYMBOL_GPL(hid_hw_output_report); 2493 2494 #ifdef CONFIG_PM 2495 int hid_driver_suspend(struct hid_device *hdev, pm_message_t state) 2496 { 2497 if (hdev->driver && hdev->driver->suspend) 2498 return hdev->driver->suspend(hdev, state); 2499 2500 return 0; 2501 } 2502 EXPORT_SYMBOL_GPL(hid_driver_suspend); 2503 2504 int hid_driver_reset_resume(struct hid_device *hdev) 2505 { 2506 if (hdev->driver && hdev->driver->reset_resume) 2507 return hdev->driver->reset_resume(hdev); 2508 2509 return 0; 2510 } 2511 EXPORT_SYMBOL_GPL(hid_driver_reset_resume); 2512 2513 int hid_driver_resume(struct hid_device *hdev) 2514 { 2515 if (hdev->driver && hdev->driver->resume) 2516 return hdev->driver->resume(hdev); 2517 2518 return 0; 2519 } 2520 EXPORT_SYMBOL_GPL(hid_driver_resume); 2521 #endif /* CONFIG_PM */ 2522 2523 struct hid_dynid { 2524 struct list_head list; 2525 struct hid_device_id id; 2526 }; 2527 2528 /** 2529 * new_id_store - add a new HID device ID to this driver and re-probe devices 2530 * @drv: target device driver 2531 * @buf: buffer for scanning device ID data 2532 * @count: input size 2533 * 2534 * Adds a new dynamic hid device ID to this driver, 2535 * and causes the driver to probe for all devices again. 2536 */ 2537 static ssize_t new_id_store(struct device_driver *drv, const char *buf, 2538 size_t count) 2539 { 2540 struct hid_driver *hdrv = to_hid_driver(drv); 2541 struct hid_dynid *dynid; 2542 __u32 bus, vendor, product; 2543 unsigned long driver_data = 0; 2544 int ret; 2545 2546 ret = sscanf(buf, "%x %x %x %lx", 2547 &bus, &vendor, &product, &driver_data); 2548 if (ret < 3) 2549 return -EINVAL; 2550 2551 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); 2552 if (!dynid) 2553 return -ENOMEM; 2554 2555 dynid->id.bus = bus; 2556 dynid->id.group = HID_GROUP_ANY; 2557 dynid->id.vendor = vendor; 2558 dynid->id.product = product; 2559 dynid->id.driver_data = driver_data; 2560 2561 spin_lock(&hdrv->dyn_lock); 2562 list_add_tail(&dynid->list, &hdrv->dyn_list); 2563 spin_unlock(&hdrv->dyn_lock); 2564 2565 ret = driver_attach(&hdrv->driver); 2566 2567 return ret ? : count; 2568 } 2569 static DRIVER_ATTR_WO(new_id); 2570 2571 static struct attribute *hid_drv_attrs[] = { 2572 &driver_attr_new_id.attr, 2573 NULL, 2574 }; 2575 ATTRIBUTE_GROUPS(hid_drv); 2576 2577 static void hid_free_dynids(struct hid_driver *hdrv) 2578 { 2579 struct hid_dynid *dynid, *n; 2580 2581 spin_lock(&hdrv->dyn_lock); 2582 list_for_each_entry_safe(dynid, n, &hdrv->dyn_list, list) { 2583 list_del(&dynid->list); 2584 kfree(dynid); 2585 } 2586 spin_unlock(&hdrv->dyn_lock); 2587 } 2588 2589 const struct hid_device_id *hid_match_device(struct hid_device *hdev, 2590 struct hid_driver *hdrv) 2591 { 2592 struct hid_dynid *dynid; 2593 2594 spin_lock(&hdrv->dyn_lock); 2595 list_for_each_entry(dynid, &hdrv->dyn_list, list) { 2596 if (hid_match_one_id(hdev, &dynid->id)) { 2597 spin_unlock(&hdrv->dyn_lock); 2598 return &dynid->id; 2599 } 2600 } 2601 spin_unlock(&hdrv->dyn_lock); 2602 2603 return hid_match_id(hdev, hdrv->id_table); 2604 } 2605 EXPORT_SYMBOL_GPL(hid_match_device); 2606 2607 static int hid_bus_match(struct device *dev, const struct device_driver *drv) 2608 { 2609 struct hid_driver *hdrv = to_hid_driver(drv); 2610 struct hid_device *hdev = to_hid_device(dev); 2611 2612 return hid_match_device(hdev, hdrv) != NULL; 2613 } 2614 2615 /** 2616 * hid_compare_device_paths - check if both devices share the same path 2617 * @hdev_a: hid device 2618 * @hdev_b: hid device 2619 * @separator: char to use as separator 2620 * 2621 * Check if two devices share the same path up to the last occurrence of 2622 * the separator char. Both paths must exist (i.e., zero-length paths 2623 * don't match). 2624 */ 2625 bool hid_compare_device_paths(struct hid_device *hdev_a, 2626 struct hid_device *hdev_b, char separator) 2627 { 2628 int n1 = strrchr(hdev_a->phys, separator) - hdev_a->phys; 2629 int n2 = strrchr(hdev_b->phys, separator) - hdev_b->phys; 2630 2631 if (n1 != n2 || n1 <= 0 || n2 <= 0) 2632 return false; 2633 2634 return !strncmp(hdev_a->phys, hdev_b->phys, n1); 2635 } 2636 EXPORT_SYMBOL_GPL(hid_compare_device_paths); 2637 2638 static bool hid_check_device_match(struct hid_device *hdev, 2639 struct hid_driver *hdrv, 2640 const struct hid_device_id **id) 2641 { 2642 *id = hid_match_device(hdev, hdrv); 2643 if (!*id) 2644 return false; 2645 2646 if (hdrv->match) 2647 return hdrv->match(hdev, hid_ignore_special_drivers); 2648 2649 /* 2650 * hid-generic implements .match(), so we must be dealing with a 2651 * different HID driver here, and can simply check if 2652 * hid_ignore_special_drivers is set or not. 2653 */ 2654 return !hid_ignore_special_drivers; 2655 } 2656 2657 static int __hid_device_probe(struct hid_device *hdev, struct hid_driver *hdrv) 2658 { 2659 const struct hid_device_id *id; 2660 int ret; 2661 2662 if (!hid_check_device_match(hdev, hdrv, &id)) 2663 return -ENODEV; 2664 2665 hdev->devres_group_id = devres_open_group(&hdev->dev, NULL, GFP_KERNEL); 2666 if (!hdev->devres_group_id) 2667 return -ENOMEM; 2668 2669 /* reset the quirks that has been previously set */ 2670 hdev->quirks = hid_lookup_quirk(hdev); 2671 hdev->driver = hdrv; 2672 2673 if (hdrv->probe) { 2674 ret = hdrv->probe(hdev, id); 2675 } else { /* default probe */ 2676 ret = hid_open_report(hdev); 2677 if (!ret) 2678 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); 2679 } 2680 2681 /* 2682 * Note that we are not closing the devres group opened above so 2683 * even resources that were attached to the device after probe is 2684 * run are released when hid_device_remove() is executed. This is 2685 * needed as some drivers would allocate additional resources, 2686 * for example when updating firmware. 2687 */ 2688 2689 if (ret) { 2690 devres_release_group(&hdev->dev, hdev->devres_group_id); 2691 hid_close_report(hdev); 2692 hdev->driver = NULL; 2693 } 2694 2695 return ret; 2696 } 2697 2698 static int hid_device_probe(struct device *dev) 2699 { 2700 struct hid_device *hdev = to_hid_device(dev); 2701 struct hid_driver *hdrv = to_hid_driver(dev->driver); 2702 int ret = 0; 2703 2704 if (down_interruptible(&hdev->driver_input_lock)) 2705 return -EINTR; 2706 2707 hdev->io_started = false; 2708 clear_bit(ffs(HID_STAT_REPROBED), &hdev->status); 2709 2710 if (!hdev->driver) 2711 ret = __hid_device_probe(hdev, hdrv); 2712 2713 if (!hdev->io_started) 2714 up(&hdev->driver_input_lock); 2715 2716 return ret; 2717 } 2718 2719 static void hid_device_remove(struct device *dev) 2720 { 2721 struct hid_device *hdev = to_hid_device(dev); 2722 struct hid_driver *hdrv; 2723 2724 down(&hdev->driver_input_lock); 2725 hdev->io_started = false; 2726 2727 hdrv = hdev->driver; 2728 if (hdrv) { 2729 if (hdrv->remove) 2730 hdrv->remove(hdev); 2731 else /* default remove */ 2732 hid_hw_stop(hdev); 2733 2734 /* Release all devres resources allocated by the driver */ 2735 devres_release_group(&hdev->dev, hdev->devres_group_id); 2736 2737 hid_close_report(hdev); 2738 hdev->driver = NULL; 2739 } 2740 2741 if (!hdev->io_started) 2742 up(&hdev->driver_input_lock); 2743 } 2744 2745 static ssize_t modalias_show(struct device *dev, struct device_attribute *a, 2746 char *buf) 2747 { 2748 struct hid_device *hdev = container_of(dev, struct hid_device, dev); 2749 2750 return scnprintf(buf, PAGE_SIZE, "hid:b%04Xg%04Xv%08Xp%08X\n", 2751 hdev->bus, hdev->group, hdev->vendor, hdev->product); 2752 } 2753 static DEVICE_ATTR_RO(modalias); 2754 2755 static struct attribute *hid_dev_attrs[] = { 2756 &dev_attr_modalias.attr, 2757 NULL, 2758 }; 2759 static struct bin_attribute *hid_dev_bin_attrs[] = { 2760 &dev_bin_attr_report_desc, 2761 NULL 2762 }; 2763 static const struct attribute_group hid_dev_group = { 2764 .attrs = hid_dev_attrs, 2765 .bin_attrs = hid_dev_bin_attrs, 2766 }; 2767 __ATTRIBUTE_GROUPS(hid_dev); 2768 2769 static int hid_uevent(const struct device *dev, struct kobj_uevent_env *env) 2770 { 2771 const struct hid_device *hdev = to_hid_device(dev); 2772 2773 if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X", 2774 hdev->bus, hdev->vendor, hdev->product)) 2775 return -ENOMEM; 2776 2777 if (add_uevent_var(env, "HID_NAME=%s", hdev->name)) 2778 return -ENOMEM; 2779 2780 if (add_uevent_var(env, "HID_PHYS=%s", hdev->phys)) 2781 return -ENOMEM; 2782 2783 if (add_uevent_var(env, "HID_UNIQ=%s", hdev->uniq)) 2784 return -ENOMEM; 2785 2786 if (add_uevent_var(env, "MODALIAS=hid:b%04Xg%04Xv%08Xp%08X", 2787 hdev->bus, hdev->group, hdev->vendor, hdev->product)) 2788 return -ENOMEM; 2789 2790 return 0; 2791 } 2792 2793 const struct bus_type hid_bus_type = { 2794 .name = "hid", 2795 .dev_groups = hid_dev_groups, 2796 .drv_groups = hid_drv_groups, 2797 .match = hid_bus_match, 2798 .probe = hid_device_probe, 2799 .remove = hid_device_remove, 2800 .uevent = hid_uevent, 2801 }; 2802 EXPORT_SYMBOL(hid_bus_type); 2803 2804 int hid_add_device(struct hid_device *hdev) 2805 { 2806 static atomic_t id = ATOMIC_INIT(0); 2807 int ret; 2808 2809 if (WARN_ON(hdev->status & HID_STAT_ADDED)) 2810 return -EBUSY; 2811 2812 hdev->quirks = hid_lookup_quirk(hdev); 2813 2814 /* we need to kill them here, otherwise they will stay allocated to 2815 * wait for coming driver */ 2816 if (hid_ignore(hdev)) 2817 return -ENODEV; 2818 2819 /* 2820 * Check for the mandatory transport channel. 2821 */ 2822 if (!hdev->ll_driver->raw_request) { 2823 hid_err(hdev, "transport driver missing .raw_request()\n"); 2824 return -EINVAL; 2825 } 2826 2827 /* 2828 * Read the device report descriptor once and use as template 2829 * for the driver-specific modifications. 2830 */ 2831 ret = hdev->ll_driver->parse(hdev); 2832 if (ret) 2833 return ret; 2834 if (!hdev->dev_rdesc) 2835 return -ENODEV; 2836 2837 /* 2838 * Scan generic devices for group information 2839 */ 2840 if (hid_ignore_special_drivers) { 2841 hdev->group = HID_GROUP_GENERIC; 2842 } else if (!hdev->group && 2843 !(hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)) { 2844 ret = hid_scan_report(hdev); 2845 if (ret) 2846 hid_warn(hdev, "bad device descriptor (%d)\n", ret); 2847 } 2848 2849 hdev->id = atomic_inc_return(&id); 2850 2851 /* XXX hack, any other cleaner solution after the driver core 2852 * is converted to allow more than 20 bytes as the device name? */ 2853 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus, 2854 hdev->vendor, hdev->product, hdev->id); 2855 2856 hid_debug_register(hdev, dev_name(&hdev->dev)); 2857 ret = device_add(&hdev->dev); 2858 if (!ret) 2859 hdev->status |= HID_STAT_ADDED; 2860 else 2861 hid_debug_unregister(hdev); 2862 2863 return ret; 2864 } 2865 EXPORT_SYMBOL_GPL(hid_add_device); 2866 2867 /** 2868 * hid_allocate_device - allocate new hid device descriptor 2869 * 2870 * Allocate and initialize hid device, so that hid_destroy_device might be 2871 * used to free it. 2872 * 2873 * New hid_device pointer is returned on success, otherwise ERR_PTR encoded 2874 * error value. 2875 */ 2876 struct hid_device *hid_allocate_device(void) 2877 { 2878 struct hid_device *hdev; 2879 int ret = -ENOMEM; 2880 2881 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL); 2882 if (hdev == NULL) 2883 return ERR_PTR(ret); 2884 2885 device_initialize(&hdev->dev); 2886 hdev->dev.release = hid_device_release; 2887 hdev->dev.bus = &hid_bus_type; 2888 device_enable_async_suspend(&hdev->dev); 2889 2890 hid_close_report(hdev); 2891 2892 init_waitqueue_head(&hdev->debug_wait); 2893 INIT_LIST_HEAD(&hdev->debug_list); 2894 spin_lock_init(&hdev->debug_list_lock); 2895 sema_init(&hdev->driver_input_lock, 1); 2896 mutex_init(&hdev->ll_open_lock); 2897 kref_init(&hdev->ref); 2898 2899 ret = hid_bpf_device_init(hdev); 2900 if (ret) 2901 goto out_err; 2902 2903 return hdev; 2904 2905 out_err: 2906 hid_destroy_device(hdev); 2907 return ERR_PTR(ret); 2908 } 2909 EXPORT_SYMBOL_GPL(hid_allocate_device); 2910 2911 static void hid_remove_device(struct hid_device *hdev) 2912 { 2913 if (hdev->status & HID_STAT_ADDED) { 2914 device_del(&hdev->dev); 2915 hid_debug_unregister(hdev); 2916 hdev->status &= ~HID_STAT_ADDED; 2917 } 2918 kfree(hdev->dev_rdesc); 2919 hdev->dev_rdesc = NULL; 2920 hdev->dev_rsize = 0; 2921 } 2922 2923 /** 2924 * hid_destroy_device - free previously allocated device 2925 * 2926 * @hdev: hid device 2927 * 2928 * If you allocate hid_device through hid_allocate_device, you should ever 2929 * free by this function. 2930 */ 2931 void hid_destroy_device(struct hid_device *hdev) 2932 { 2933 hid_bpf_destroy_device(hdev); 2934 hid_remove_device(hdev); 2935 put_device(&hdev->dev); 2936 } 2937 EXPORT_SYMBOL_GPL(hid_destroy_device); 2938 2939 2940 static int __hid_bus_reprobe_drivers(struct device *dev, void *data) 2941 { 2942 struct hid_driver *hdrv = data; 2943 struct hid_device *hdev = to_hid_device(dev); 2944 2945 if (hdev->driver == hdrv && 2946 !hdrv->match(hdev, hid_ignore_special_drivers) && 2947 !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status)) 2948 return device_reprobe(dev); 2949 2950 return 0; 2951 } 2952 2953 static int __hid_bus_driver_added(struct device_driver *drv, void *data) 2954 { 2955 struct hid_driver *hdrv = to_hid_driver(drv); 2956 2957 if (hdrv->match) { 2958 bus_for_each_dev(&hid_bus_type, NULL, hdrv, 2959 __hid_bus_reprobe_drivers); 2960 } 2961 2962 return 0; 2963 } 2964 2965 static int __bus_removed_driver(struct device_driver *drv, void *data) 2966 { 2967 return bus_rescan_devices(&hid_bus_type); 2968 } 2969 2970 int __hid_register_driver(struct hid_driver *hdrv, struct module *owner, 2971 const char *mod_name) 2972 { 2973 int ret; 2974 2975 hdrv->driver.name = hdrv->name; 2976 hdrv->driver.bus = &hid_bus_type; 2977 hdrv->driver.owner = owner; 2978 hdrv->driver.mod_name = mod_name; 2979 2980 INIT_LIST_HEAD(&hdrv->dyn_list); 2981 spin_lock_init(&hdrv->dyn_lock); 2982 2983 ret = driver_register(&hdrv->driver); 2984 2985 if (ret == 0) 2986 bus_for_each_drv(&hid_bus_type, NULL, NULL, 2987 __hid_bus_driver_added); 2988 2989 return ret; 2990 } 2991 EXPORT_SYMBOL_GPL(__hid_register_driver); 2992 2993 void hid_unregister_driver(struct hid_driver *hdrv) 2994 { 2995 driver_unregister(&hdrv->driver); 2996 hid_free_dynids(hdrv); 2997 2998 bus_for_each_drv(&hid_bus_type, NULL, hdrv, __bus_removed_driver); 2999 } 3000 EXPORT_SYMBOL_GPL(hid_unregister_driver); 3001 3002 int hid_check_keys_pressed(struct hid_device *hid) 3003 { 3004 struct hid_input *hidinput; 3005 int i; 3006 3007 if (!(hid->claimed & HID_CLAIMED_INPUT)) 3008 return 0; 3009 3010 list_for_each_entry(hidinput, &hid->inputs, list) { 3011 for (i = 0; i < BITS_TO_LONGS(KEY_MAX); i++) 3012 if (hidinput->input->key[i]) 3013 return 1; 3014 } 3015 3016 return 0; 3017 } 3018 EXPORT_SYMBOL_GPL(hid_check_keys_pressed); 3019 3020 #ifdef CONFIG_HID_BPF 3021 static struct hid_ops __hid_ops = { 3022 .hid_get_report = hid_get_report, 3023 .hid_hw_raw_request = __hid_hw_raw_request, 3024 .hid_hw_output_report = __hid_hw_output_report, 3025 .hid_input_report = __hid_input_report, 3026 .owner = THIS_MODULE, 3027 .bus_type = &hid_bus_type, 3028 }; 3029 #endif 3030 3031 static int __init hid_init(void) 3032 { 3033 int ret; 3034 3035 ret = bus_register(&hid_bus_type); 3036 if (ret) { 3037 pr_err("can't register hid bus\n"); 3038 goto err; 3039 } 3040 3041 #ifdef CONFIG_HID_BPF 3042 hid_ops = &__hid_ops; 3043 #endif 3044 3045 ret = hidraw_init(); 3046 if (ret) 3047 goto err_bus; 3048 3049 hid_debug_init(); 3050 3051 return 0; 3052 err_bus: 3053 bus_unregister(&hid_bus_type); 3054 err: 3055 return ret; 3056 } 3057 3058 static void __exit hid_exit(void) 3059 { 3060 #ifdef CONFIG_HID_BPF 3061 hid_ops = NULL; 3062 #endif 3063 hid_debug_exit(); 3064 hidraw_exit(); 3065 bus_unregister(&hid_bus_type); 3066 hid_quirks_exit(HID_BUS_ANY); 3067 } 3068 3069 module_init(hid_init); 3070 module_exit(hid_exit); 3071 3072 MODULE_AUTHOR("Andreas Gal"); 3073 MODULE_AUTHOR("Vojtech Pavlik"); 3074 MODULE_AUTHOR("Jiri Kosina"); 3075 MODULE_DESCRIPTION("HID support for Linux"); 3076 MODULE_LICENSE("GPL"); 3077