1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * HID support for Linux 4 * 5 * Copyright (c) 1999 Andreas Gal 6 * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> 7 * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc 8 * Copyright (c) 2006-2012 Jiri Kosina 9 */ 10 11 /* 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 16 #include <linux/module.h> 17 #include <linux/slab.h> 18 #include <linux/init.h> 19 #include <linux/kernel.h> 20 #include <linux/list.h> 21 #include <linux/mm.h> 22 #include <linux/spinlock.h> 23 #include <asm/unaligned.h> 24 #include <asm/byteorder.h> 25 #include <linux/input.h> 26 #include <linux/wait.h> 27 #include <linux/vmalloc.h> 28 #include <linux/sched.h> 29 #include <linux/semaphore.h> 30 31 #include <linux/hid.h> 32 #include <linux/hiddev.h> 33 #include <linux/hid-debug.h> 34 #include <linux/hidraw.h> 35 36 #include "hid-ids.h" 37 38 /* 39 * Version Information 40 */ 41 42 #define DRIVER_DESC "HID core driver" 43 44 static int hid_ignore_special_drivers = 0; 45 module_param_named(ignore_special_drivers, hid_ignore_special_drivers, int, 0600); 46 MODULE_PARM_DESC(ignore_special_drivers, "Ignore any special drivers and handle all devices by generic driver"); 47 48 /* 49 * Register a new report for a device. 50 */ 51 52 struct hid_report *hid_register_report(struct hid_device *device, 53 enum hid_report_type type, unsigned int id, 54 unsigned int application) 55 { 56 struct hid_report_enum *report_enum = device->report_enum + type; 57 struct hid_report *report; 58 59 if (id >= HID_MAX_IDS) 60 return NULL; 61 if (report_enum->report_id_hash[id]) 62 return report_enum->report_id_hash[id]; 63 64 report = kzalloc(sizeof(struct hid_report), GFP_KERNEL); 65 if (!report) 66 return NULL; 67 68 if (id != 0) 69 report_enum->numbered = 1; 70 71 report->id = id; 72 report->type = type; 73 report->size = 0; 74 report->device = device; 75 report->application = application; 76 report_enum->report_id_hash[id] = report; 77 78 list_add_tail(&report->list, &report_enum->report_list); 79 INIT_LIST_HEAD(&report->field_entry_list); 80 81 return report; 82 } 83 EXPORT_SYMBOL_GPL(hid_register_report); 84 85 /* 86 * Register a new field for this report. 87 */ 88 89 static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages) 90 { 91 struct hid_field *field; 92 93 if (report->maxfield == HID_MAX_FIELDS) { 94 hid_err(report->device, "too many fields in report\n"); 95 return NULL; 96 } 97 98 field = kzalloc((sizeof(struct hid_field) + 99 usages * sizeof(struct hid_usage) + 100 3 * usages * sizeof(unsigned int)), GFP_KERNEL); 101 if (!field) 102 return NULL; 103 104 field->index = report->maxfield++; 105 report->field[field->index] = field; 106 field->usage = (struct hid_usage *)(field + 1); 107 field->value = (s32 *)(field->usage + usages); 108 field->new_value = (s32 *)(field->value + usages); 109 field->usages_priorities = (s32 *)(field->new_value + usages); 110 field->report = report; 111 112 return field; 113 } 114 115 /* 116 * Open a collection. The type/usage is pushed on the stack. 117 */ 118 119 static int open_collection(struct hid_parser *parser, unsigned type) 120 { 121 struct hid_collection *collection; 122 unsigned usage; 123 int collection_index; 124 125 usage = parser->local.usage[0]; 126 127 if (parser->collection_stack_ptr == parser->collection_stack_size) { 128 unsigned int *collection_stack; 129 unsigned int new_size = parser->collection_stack_size + 130 HID_COLLECTION_STACK_SIZE; 131 132 collection_stack = krealloc(parser->collection_stack, 133 new_size * sizeof(unsigned int), 134 GFP_KERNEL); 135 if (!collection_stack) 136 return -ENOMEM; 137 138 parser->collection_stack = collection_stack; 139 parser->collection_stack_size = new_size; 140 } 141 142 if (parser->device->maxcollection == parser->device->collection_size) { 143 collection = kmalloc( 144 array3_size(sizeof(struct hid_collection), 145 parser->device->collection_size, 146 2), 147 GFP_KERNEL); 148 if (collection == NULL) { 149 hid_err(parser->device, "failed to reallocate collection array\n"); 150 return -ENOMEM; 151 } 152 memcpy(collection, parser->device->collection, 153 sizeof(struct hid_collection) * 154 parser->device->collection_size); 155 memset(collection + parser->device->collection_size, 0, 156 sizeof(struct hid_collection) * 157 parser->device->collection_size); 158 kfree(parser->device->collection); 159 parser->device->collection = collection; 160 parser->device->collection_size *= 2; 161 } 162 163 parser->collection_stack[parser->collection_stack_ptr++] = 164 parser->device->maxcollection; 165 166 collection_index = parser->device->maxcollection++; 167 collection = parser->device->collection + collection_index; 168 collection->type = type; 169 collection->usage = usage; 170 collection->level = parser->collection_stack_ptr - 1; 171 collection->parent_idx = (collection->level == 0) ? -1 : 172 parser->collection_stack[collection->level - 1]; 173 174 if (type == HID_COLLECTION_APPLICATION) 175 parser->device->maxapplication++; 176 177 return 0; 178 } 179 180 /* 181 * Close a collection. 182 */ 183 184 static int close_collection(struct hid_parser *parser) 185 { 186 if (!parser->collection_stack_ptr) { 187 hid_err(parser->device, "collection stack underflow\n"); 188 return -EINVAL; 189 } 190 parser->collection_stack_ptr--; 191 return 0; 192 } 193 194 /* 195 * Climb up the stack, search for the specified collection type 196 * and return the usage. 197 */ 198 199 static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type) 200 { 201 struct hid_collection *collection = parser->device->collection; 202 int n; 203 204 for (n = parser->collection_stack_ptr - 1; n >= 0; n--) { 205 unsigned index = parser->collection_stack[n]; 206 if (collection[index].type == type) 207 return collection[index].usage; 208 } 209 return 0; /* we know nothing about this usage type */ 210 } 211 212 /* 213 * Concatenate usage which defines 16 bits or less with the 214 * currently defined usage page to form a 32 bit usage 215 */ 216 217 static void complete_usage(struct hid_parser *parser, unsigned int index) 218 { 219 parser->local.usage[index] &= 0xFFFF; 220 parser->local.usage[index] |= 221 (parser->global.usage_page & 0xFFFF) << 16; 222 } 223 224 /* 225 * Add a usage to the temporary parser table. 226 */ 227 228 static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size) 229 { 230 if (parser->local.usage_index >= HID_MAX_USAGES) { 231 hid_err(parser->device, "usage index exceeded\n"); 232 return -1; 233 } 234 parser->local.usage[parser->local.usage_index] = usage; 235 236 /* 237 * If Usage item only includes usage id, concatenate it with 238 * currently defined usage page 239 */ 240 if (size <= 2) 241 complete_usage(parser, parser->local.usage_index); 242 243 parser->local.usage_size[parser->local.usage_index] = size; 244 parser->local.collection_index[parser->local.usage_index] = 245 parser->collection_stack_ptr ? 246 parser->collection_stack[parser->collection_stack_ptr - 1] : 0; 247 parser->local.usage_index++; 248 return 0; 249 } 250 251 /* 252 * Register a new field for this report. 253 */ 254 255 static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsigned flags) 256 { 257 struct hid_report *report; 258 struct hid_field *field; 259 unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE; 260 unsigned int usages; 261 unsigned int offset; 262 unsigned int i; 263 unsigned int application; 264 265 application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION); 266 267 report = hid_register_report(parser->device, report_type, 268 parser->global.report_id, application); 269 if (!report) { 270 hid_err(parser->device, "hid_register_report failed\n"); 271 return -1; 272 } 273 274 /* Handle both signed and unsigned cases properly */ 275 if ((parser->global.logical_minimum < 0 && 276 parser->global.logical_maximum < 277 parser->global.logical_minimum) || 278 (parser->global.logical_minimum >= 0 && 279 (__u32)parser->global.logical_maximum < 280 (__u32)parser->global.logical_minimum)) { 281 dbg_hid("logical range invalid 0x%x 0x%x\n", 282 parser->global.logical_minimum, 283 parser->global.logical_maximum); 284 return -1; 285 } 286 287 offset = report->size; 288 report->size += parser->global.report_size * parser->global.report_count; 289 290 if (parser->device->ll_driver->max_buffer_size) 291 max_buffer_size = parser->device->ll_driver->max_buffer_size; 292 293 /* Total size check: Allow for possible report index byte */ 294 if (report->size > (max_buffer_size - 1) << 3) { 295 hid_err(parser->device, "report is too long\n"); 296 return -1; 297 } 298 299 if (!parser->local.usage_index) /* Ignore padding fields */ 300 return 0; 301 302 usages = max_t(unsigned, parser->local.usage_index, 303 parser->global.report_count); 304 305 field = hid_register_field(report, usages); 306 if (!field) 307 return 0; 308 309 field->physical = hid_lookup_collection(parser, HID_COLLECTION_PHYSICAL); 310 field->logical = hid_lookup_collection(parser, HID_COLLECTION_LOGICAL); 311 field->application = application; 312 313 for (i = 0; i < usages; i++) { 314 unsigned j = i; 315 /* Duplicate the last usage we parsed if we have excess values */ 316 if (i >= parser->local.usage_index) 317 j = parser->local.usage_index - 1; 318 field->usage[i].hid = parser->local.usage[j]; 319 field->usage[i].collection_index = 320 parser->local.collection_index[j]; 321 field->usage[i].usage_index = i; 322 field->usage[i].resolution_multiplier = 1; 323 } 324 325 field->maxusage = usages; 326 field->flags = flags; 327 field->report_offset = offset; 328 field->report_type = report_type; 329 field->report_size = parser->global.report_size; 330 field->report_count = parser->global.report_count; 331 field->logical_minimum = parser->global.logical_minimum; 332 field->logical_maximum = parser->global.logical_maximum; 333 field->physical_minimum = parser->global.physical_minimum; 334 field->physical_maximum = parser->global.physical_maximum; 335 field->unit_exponent = parser->global.unit_exponent; 336 field->unit = parser->global.unit; 337 338 return 0; 339 } 340 341 /* 342 * Read data value from item. 343 */ 344 345 static u32 item_udata(struct hid_item *item) 346 { 347 switch (item->size) { 348 case 1: return item->data.u8; 349 case 2: return item->data.u16; 350 case 4: return item->data.u32; 351 } 352 return 0; 353 } 354 355 static s32 item_sdata(struct hid_item *item) 356 { 357 switch (item->size) { 358 case 1: return item->data.s8; 359 case 2: return item->data.s16; 360 case 4: return item->data.s32; 361 } 362 return 0; 363 } 364 365 /* 366 * Process a global item. 367 */ 368 369 static int hid_parser_global(struct hid_parser *parser, struct hid_item *item) 370 { 371 __s32 raw_value; 372 switch (item->tag) { 373 case HID_GLOBAL_ITEM_TAG_PUSH: 374 375 if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) { 376 hid_err(parser->device, "global environment stack overflow\n"); 377 return -1; 378 } 379 380 memcpy(parser->global_stack + parser->global_stack_ptr++, 381 &parser->global, sizeof(struct hid_global)); 382 return 0; 383 384 case HID_GLOBAL_ITEM_TAG_POP: 385 386 if (!parser->global_stack_ptr) { 387 hid_err(parser->device, "global environment stack underflow\n"); 388 return -1; 389 } 390 391 memcpy(&parser->global, parser->global_stack + 392 --parser->global_stack_ptr, sizeof(struct hid_global)); 393 return 0; 394 395 case HID_GLOBAL_ITEM_TAG_USAGE_PAGE: 396 parser->global.usage_page = item_udata(item); 397 return 0; 398 399 case HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM: 400 parser->global.logical_minimum = item_sdata(item); 401 return 0; 402 403 case HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM: 404 if (parser->global.logical_minimum < 0) 405 parser->global.logical_maximum = item_sdata(item); 406 else 407 parser->global.logical_maximum = item_udata(item); 408 return 0; 409 410 case HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM: 411 parser->global.physical_minimum = item_sdata(item); 412 return 0; 413 414 case HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM: 415 if (parser->global.physical_minimum < 0) 416 parser->global.physical_maximum = item_sdata(item); 417 else 418 parser->global.physical_maximum = item_udata(item); 419 return 0; 420 421 case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT: 422 /* Many devices provide unit exponent as a two's complement 423 * nibble due to the common misunderstanding of HID 424 * specification 1.11, 6.2.2.7 Global Items. Attempt to handle 425 * both this and the standard encoding. */ 426 raw_value = item_sdata(item); 427 if (!(raw_value & 0xfffffff0)) 428 parser->global.unit_exponent = hid_snto32(raw_value, 4); 429 else 430 parser->global.unit_exponent = raw_value; 431 return 0; 432 433 case HID_GLOBAL_ITEM_TAG_UNIT: 434 parser->global.unit = item_udata(item); 435 return 0; 436 437 case HID_GLOBAL_ITEM_TAG_REPORT_SIZE: 438 parser->global.report_size = item_udata(item); 439 if (parser->global.report_size > 256) { 440 hid_err(parser->device, "invalid report_size %d\n", 441 parser->global.report_size); 442 return -1; 443 } 444 return 0; 445 446 case HID_GLOBAL_ITEM_TAG_REPORT_COUNT: 447 parser->global.report_count = item_udata(item); 448 if (parser->global.report_count > HID_MAX_USAGES) { 449 hid_err(parser->device, "invalid report_count %d\n", 450 parser->global.report_count); 451 return -1; 452 } 453 return 0; 454 455 case HID_GLOBAL_ITEM_TAG_REPORT_ID: 456 parser->global.report_id = item_udata(item); 457 if (parser->global.report_id == 0 || 458 parser->global.report_id >= HID_MAX_IDS) { 459 hid_err(parser->device, "report_id %u is invalid\n", 460 parser->global.report_id); 461 return -1; 462 } 463 return 0; 464 465 default: 466 hid_err(parser->device, "unknown global tag 0x%x\n", item->tag); 467 return -1; 468 } 469 } 470 471 /* 472 * Process a local item. 473 */ 474 475 static int hid_parser_local(struct hid_parser *parser, struct hid_item *item) 476 { 477 __u32 data; 478 unsigned n; 479 __u32 count; 480 481 data = item_udata(item); 482 483 switch (item->tag) { 484 case HID_LOCAL_ITEM_TAG_DELIMITER: 485 486 if (data) { 487 /* 488 * We treat items before the first delimiter 489 * as global to all usage sets (branch 0). 490 * In the moment we process only these global 491 * items and the first delimiter set. 492 */ 493 if (parser->local.delimiter_depth != 0) { 494 hid_err(parser->device, "nested delimiters\n"); 495 return -1; 496 } 497 parser->local.delimiter_depth++; 498 parser->local.delimiter_branch++; 499 } else { 500 if (parser->local.delimiter_depth < 1) { 501 hid_err(parser->device, "bogus close delimiter\n"); 502 return -1; 503 } 504 parser->local.delimiter_depth--; 505 } 506 return 0; 507 508 case HID_LOCAL_ITEM_TAG_USAGE: 509 510 if (parser->local.delimiter_branch > 1) { 511 dbg_hid("alternative usage ignored\n"); 512 return 0; 513 } 514 515 return hid_add_usage(parser, data, item->size); 516 517 case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM: 518 519 if (parser->local.delimiter_branch > 1) { 520 dbg_hid("alternative usage ignored\n"); 521 return 0; 522 } 523 524 parser->local.usage_minimum = data; 525 return 0; 526 527 case HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM: 528 529 if (parser->local.delimiter_branch > 1) { 530 dbg_hid("alternative usage ignored\n"); 531 return 0; 532 } 533 534 count = data - parser->local.usage_minimum; 535 if (count + parser->local.usage_index >= HID_MAX_USAGES) { 536 /* 537 * We do not warn if the name is not set, we are 538 * actually pre-scanning the device. 539 */ 540 if (dev_name(&parser->device->dev)) 541 hid_warn(parser->device, 542 "ignoring exceeding usage max\n"); 543 data = HID_MAX_USAGES - parser->local.usage_index + 544 parser->local.usage_minimum - 1; 545 if (data <= 0) { 546 hid_err(parser->device, 547 "no more usage index available\n"); 548 return -1; 549 } 550 } 551 552 for (n = parser->local.usage_minimum; n <= data; n++) 553 if (hid_add_usage(parser, n, item->size)) { 554 dbg_hid("hid_add_usage failed\n"); 555 return -1; 556 } 557 return 0; 558 559 default: 560 561 dbg_hid("unknown local item tag 0x%x\n", item->tag); 562 return 0; 563 } 564 return 0; 565 } 566 567 /* 568 * Concatenate Usage Pages into Usages where relevant: 569 * As per specification, 6.2.2.8: "When the parser encounters a main item it 570 * concatenates the last declared Usage Page with a Usage to form a complete 571 * usage value." 572 */ 573 574 static void hid_concatenate_last_usage_page(struct hid_parser *parser) 575 { 576 int i; 577 unsigned int usage_page; 578 unsigned int current_page; 579 580 if (!parser->local.usage_index) 581 return; 582 583 usage_page = parser->global.usage_page; 584 585 /* 586 * Concatenate usage page again only if last declared Usage Page 587 * has not been already used in previous usages concatenation 588 */ 589 for (i = parser->local.usage_index - 1; i >= 0; i--) { 590 if (parser->local.usage_size[i] > 2) 591 /* Ignore extended usages */ 592 continue; 593 594 current_page = parser->local.usage[i] >> 16; 595 if (current_page == usage_page) 596 break; 597 598 complete_usage(parser, i); 599 } 600 } 601 602 /* 603 * Process a main item. 604 */ 605 606 static int hid_parser_main(struct hid_parser *parser, struct hid_item *item) 607 { 608 __u32 data; 609 int ret; 610 611 hid_concatenate_last_usage_page(parser); 612 613 data = item_udata(item); 614 615 switch (item->tag) { 616 case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION: 617 ret = open_collection(parser, data & 0xff); 618 break; 619 case HID_MAIN_ITEM_TAG_END_COLLECTION: 620 ret = close_collection(parser); 621 break; 622 case HID_MAIN_ITEM_TAG_INPUT: 623 ret = hid_add_field(parser, HID_INPUT_REPORT, data); 624 break; 625 case HID_MAIN_ITEM_TAG_OUTPUT: 626 ret = hid_add_field(parser, HID_OUTPUT_REPORT, data); 627 break; 628 case HID_MAIN_ITEM_TAG_FEATURE: 629 ret = hid_add_field(parser, HID_FEATURE_REPORT, data); 630 break; 631 default: 632 hid_warn(parser->device, "unknown main item tag 0x%x\n", item->tag); 633 ret = 0; 634 } 635 636 memset(&parser->local, 0, sizeof(parser->local)); /* Reset the local parser environment */ 637 638 return ret; 639 } 640 641 /* 642 * Process a reserved item. 643 */ 644 645 static int hid_parser_reserved(struct hid_parser *parser, struct hid_item *item) 646 { 647 dbg_hid("reserved item type, tag 0x%x\n", item->tag); 648 return 0; 649 } 650 651 /* 652 * Free a report and all registered fields. The field->usage and 653 * field->value table's are allocated behind the field, so we need 654 * only to free(field) itself. 655 */ 656 657 static void hid_free_report(struct hid_report *report) 658 { 659 unsigned n; 660 661 kfree(report->field_entries); 662 663 for (n = 0; n < report->maxfield; n++) 664 kfree(report->field[n]); 665 kfree(report); 666 } 667 668 /* 669 * Close report. This function returns the device 670 * state to the point prior to hid_open_report(). 671 */ 672 static void hid_close_report(struct hid_device *device) 673 { 674 unsigned i, j; 675 676 for (i = 0; i < HID_REPORT_TYPES; i++) { 677 struct hid_report_enum *report_enum = device->report_enum + i; 678 679 for (j = 0; j < HID_MAX_IDS; j++) { 680 struct hid_report *report = report_enum->report_id_hash[j]; 681 if (report) 682 hid_free_report(report); 683 } 684 memset(report_enum, 0, sizeof(*report_enum)); 685 INIT_LIST_HEAD(&report_enum->report_list); 686 } 687 688 kfree(device->rdesc); 689 device->rdesc = NULL; 690 device->rsize = 0; 691 692 kfree(device->collection); 693 device->collection = NULL; 694 device->collection_size = 0; 695 device->maxcollection = 0; 696 device->maxapplication = 0; 697 698 device->status &= ~HID_STAT_PARSED; 699 } 700 701 /* 702 * Free a device structure, all reports, and all fields. 703 */ 704 705 static void hid_device_release(struct device *dev) 706 { 707 struct hid_device *hid = to_hid_device(dev); 708 709 hid_close_report(hid); 710 kfree(hid->dev_rdesc); 711 kfree(hid); 712 } 713 714 /* 715 * Fetch a report description item from the data stream. We support long 716 * items, though they are not used yet. 717 */ 718 719 static u8 *fetch_item(__u8 *start, __u8 *end, struct hid_item *item) 720 { 721 u8 b; 722 723 if ((end - start) <= 0) 724 return NULL; 725 726 b = *start++; 727 728 item->type = (b >> 2) & 3; 729 item->tag = (b >> 4) & 15; 730 731 if (item->tag == HID_ITEM_TAG_LONG) { 732 733 item->format = HID_ITEM_FORMAT_LONG; 734 735 if ((end - start) < 2) 736 return NULL; 737 738 item->size = *start++; 739 item->tag = *start++; 740 741 if ((end - start) < item->size) 742 return NULL; 743 744 item->data.longdata = start; 745 start += item->size; 746 return start; 747 } 748 749 item->format = HID_ITEM_FORMAT_SHORT; 750 item->size = b & 3; 751 752 switch (item->size) { 753 case 0: 754 return start; 755 756 case 1: 757 if ((end - start) < 1) 758 return NULL; 759 item->data.u8 = *start++; 760 return start; 761 762 case 2: 763 if ((end - start) < 2) 764 return NULL; 765 item->data.u16 = get_unaligned_le16(start); 766 start = (__u8 *)((__le16 *)start + 1); 767 return start; 768 769 case 3: 770 item->size++; 771 if ((end - start) < 4) 772 return NULL; 773 item->data.u32 = get_unaligned_le32(start); 774 start = (__u8 *)((__le32 *)start + 1); 775 return start; 776 } 777 778 return NULL; 779 } 780 781 static void hid_scan_input_usage(struct hid_parser *parser, u32 usage) 782 { 783 struct hid_device *hid = parser->device; 784 785 if (usage == HID_DG_CONTACTID) 786 hid->group = HID_GROUP_MULTITOUCH; 787 } 788 789 static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage) 790 { 791 if (usage == 0xff0000c5 && parser->global.report_count == 256 && 792 parser->global.report_size == 8) 793 parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8; 794 795 if (usage == 0xff0000c6 && parser->global.report_count == 1 && 796 parser->global.report_size == 8) 797 parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8; 798 } 799 800 static void hid_scan_collection(struct hid_parser *parser, unsigned type) 801 { 802 struct hid_device *hid = parser->device; 803 int i; 804 805 if (((parser->global.usage_page << 16) == HID_UP_SENSOR) && 806 (type == HID_COLLECTION_PHYSICAL || 807 type == HID_COLLECTION_APPLICATION)) 808 hid->group = HID_GROUP_SENSOR_HUB; 809 810 if (hid->vendor == USB_VENDOR_ID_MICROSOFT && 811 hid->product == USB_DEVICE_ID_MS_POWER_COVER && 812 hid->group == HID_GROUP_MULTITOUCH) 813 hid->group = HID_GROUP_GENERIC; 814 815 if ((parser->global.usage_page << 16) == HID_UP_GENDESK) 816 for (i = 0; i < parser->local.usage_index; i++) 817 if (parser->local.usage[i] == HID_GD_POINTER) 818 parser->scan_flags |= HID_SCAN_FLAG_GD_POINTER; 819 820 if ((parser->global.usage_page << 16) >= HID_UP_MSVENDOR) 821 parser->scan_flags |= HID_SCAN_FLAG_VENDOR_SPECIFIC; 822 823 if ((parser->global.usage_page << 16) == HID_UP_GOOGLEVENDOR) 824 for (i = 0; i < parser->local.usage_index; i++) 825 if (parser->local.usage[i] == 826 (HID_UP_GOOGLEVENDOR | 0x0001)) 827 parser->device->group = 828 HID_GROUP_VIVALDI; 829 } 830 831 static int hid_scan_main(struct hid_parser *parser, struct hid_item *item) 832 { 833 __u32 data; 834 int i; 835 836 hid_concatenate_last_usage_page(parser); 837 838 data = item_udata(item); 839 840 switch (item->tag) { 841 case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION: 842 hid_scan_collection(parser, data & 0xff); 843 break; 844 case HID_MAIN_ITEM_TAG_END_COLLECTION: 845 break; 846 case HID_MAIN_ITEM_TAG_INPUT: 847 /* ignore constant inputs, they will be ignored by hid-input */ 848 if (data & HID_MAIN_ITEM_CONSTANT) 849 break; 850 for (i = 0; i < parser->local.usage_index; i++) 851 hid_scan_input_usage(parser, parser->local.usage[i]); 852 break; 853 case HID_MAIN_ITEM_TAG_OUTPUT: 854 break; 855 case HID_MAIN_ITEM_TAG_FEATURE: 856 for (i = 0; i < parser->local.usage_index; i++) 857 hid_scan_feature_usage(parser, parser->local.usage[i]); 858 break; 859 } 860 861 /* Reset the local parser environment */ 862 memset(&parser->local, 0, sizeof(parser->local)); 863 864 return 0; 865 } 866 867 /* 868 * Scan a report descriptor before the device is added to the bus. 869 * Sets device groups and other properties that determine what driver 870 * to load. 871 */ 872 static int hid_scan_report(struct hid_device *hid) 873 { 874 struct hid_parser *parser; 875 struct hid_item item; 876 __u8 *start = hid->dev_rdesc; 877 __u8 *end = start + hid->dev_rsize; 878 static int (*dispatch_type[])(struct hid_parser *parser, 879 struct hid_item *item) = { 880 hid_scan_main, 881 hid_parser_global, 882 hid_parser_local, 883 hid_parser_reserved 884 }; 885 886 parser = vzalloc(sizeof(struct hid_parser)); 887 if (!parser) 888 return -ENOMEM; 889 890 parser->device = hid; 891 hid->group = HID_GROUP_GENERIC; 892 893 /* 894 * The parsing is simpler than the one in hid_open_report() as we should 895 * be robust against hid errors. Those errors will be raised by 896 * hid_open_report() anyway. 897 */ 898 while ((start = fetch_item(start, end, &item)) != NULL) 899 dispatch_type[item.type](parser, &item); 900 901 /* 902 * Handle special flags set during scanning. 903 */ 904 if ((parser->scan_flags & HID_SCAN_FLAG_MT_WIN_8) && 905 (hid->group == HID_GROUP_MULTITOUCH)) 906 hid->group = HID_GROUP_MULTITOUCH_WIN_8; 907 908 /* 909 * Vendor specific handlings 910 */ 911 switch (hid->vendor) { 912 case USB_VENDOR_ID_WACOM: 913 hid->group = HID_GROUP_WACOM; 914 break; 915 case USB_VENDOR_ID_SYNAPTICS: 916 if (hid->group == HID_GROUP_GENERIC) 917 if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC) 918 && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER)) 919 /* 920 * hid-rmi should take care of them, 921 * not hid-generic 922 */ 923 hid->group = HID_GROUP_RMI; 924 break; 925 } 926 927 kfree(parser->collection_stack); 928 vfree(parser); 929 return 0; 930 } 931 932 /** 933 * hid_parse_report - parse device report 934 * 935 * @hid: hid device 936 * @start: report start 937 * @size: report size 938 * 939 * Allocate the device report as read by the bus driver. This function should 940 * only be called from parse() in ll drivers. 941 */ 942 int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size) 943 { 944 hid->dev_rdesc = kmemdup(start, size, GFP_KERNEL); 945 if (!hid->dev_rdesc) 946 return -ENOMEM; 947 hid->dev_rsize = size; 948 return 0; 949 } 950 EXPORT_SYMBOL_GPL(hid_parse_report); 951 952 static const char * const hid_report_names[] = { 953 "HID_INPUT_REPORT", 954 "HID_OUTPUT_REPORT", 955 "HID_FEATURE_REPORT", 956 }; 957 /** 958 * hid_validate_values - validate existing device report's value indexes 959 * 960 * @hid: hid device 961 * @type: which report type to examine 962 * @id: which report ID to examine (0 for first) 963 * @field_index: which report field to examine 964 * @report_counts: expected number of values 965 * 966 * Validate the number of values in a given field of a given report, after 967 * parsing. 968 */ 969 struct hid_report *hid_validate_values(struct hid_device *hid, 970 enum hid_report_type type, unsigned int id, 971 unsigned int field_index, 972 unsigned int report_counts) 973 { 974 struct hid_report *report; 975 976 if (type > HID_FEATURE_REPORT) { 977 hid_err(hid, "invalid HID report type %u\n", type); 978 return NULL; 979 } 980 981 if (id >= HID_MAX_IDS) { 982 hid_err(hid, "invalid HID report id %u\n", id); 983 return NULL; 984 } 985 986 /* 987 * Explicitly not using hid_get_report() here since it depends on 988 * ->numbered being checked, which may not always be the case when 989 * drivers go to access report values. 990 */ 991 if (id == 0) { 992 /* 993 * Validating on id 0 means we should examine the first 994 * report in the list. 995 */ 996 report = list_first_entry_or_null( 997 &hid->report_enum[type].report_list, 998 struct hid_report, list); 999 } else { 1000 report = hid->report_enum[type].report_id_hash[id]; 1001 } 1002 if (!report) { 1003 hid_err(hid, "missing %s %u\n", hid_report_names[type], id); 1004 return NULL; 1005 } 1006 if (report->maxfield <= field_index) { 1007 hid_err(hid, "not enough fields in %s %u\n", 1008 hid_report_names[type], id); 1009 return NULL; 1010 } 1011 if (report->field[field_index]->report_count < report_counts) { 1012 hid_err(hid, "not enough values in %s %u field %u\n", 1013 hid_report_names[type], id, field_index); 1014 return NULL; 1015 } 1016 return report; 1017 } 1018 EXPORT_SYMBOL_GPL(hid_validate_values); 1019 1020 static int hid_calculate_multiplier(struct hid_device *hid, 1021 struct hid_field *multiplier) 1022 { 1023 int m; 1024 __s32 v = *multiplier->value; 1025 __s32 lmin = multiplier->logical_minimum; 1026 __s32 lmax = multiplier->logical_maximum; 1027 __s32 pmin = multiplier->physical_minimum; 1028 __s32 pmax = multiplier->physical_maximum; 1029 1030 /* 1031 * "Because OS implementations will generally divide the control's 1032 * reported count by the Effective Resolution Multiplier, designers 1033 * should take care not to establish a potential Effective 1034 * Resolution Multiplier of zero." 1035 * HID Usage Table, v1.12, Section 4.3.1, p31 1036 */ 1037 if (lmax - lmin == 0) 1038 return 1; 1039 /* 1040 * Handling the unit exponent is left as an exercise to whoever 1041 * finds a device where that exponent is not 0. 1042 */ 1043 m = ((v - lmin)/(lmax - lmin) * (pmax - pmin) + pmin); 1044 if (unlikely(multiplier->unit_exponent != 0)) { 1045 hid_warn(hid, 1046 "unsupported Resolution Multiplier unit exponent %d\n", 1047 multiplier->unit_exponent); 1048 } 1049 1050 /* There are no devices with an effective multiplier > 255 */ 1051 if (unlikely(m == 0 || m > 255 || m < -255)) { 1052 hid_warn(hid, "unsupported Resolution Multiplier %d\n", m); 1053 m = 1; 1054 } 1055 1056 return m; 1057 } 1058 1059 static void hid_apply_multiplier_to_field(struct hid_device *hid, 1060 struct hid_field *field, 1061 struct hid_collection *multiplier_collection, 1062 int effective_multiplier) 1063 { 1064 struct hid_collection *collection; 1065 struct hid_usage *usage; 1066 int i; 1067 1068 /* 1069 * If multiplier_collection is NULL, the multiplier applies 1070 * to all fields in the report. 1071 * Otherwise, it is the Logical Collection the multiplier applies to 1072 * but our field may be in a subcollection of that collection. 1073 */ 1074 for (i = 0; i < field->maxusage; i++) { 1075 usage = &field->usage[i]; 1076 1077 collection = &hid->collection[usage->collection_index]; 1078 while (collection->parent_idx != -1 && 1079 collection != multiplier_collection) 1080 collection = &hid->collection[collection->parent_idx]; 1081 1082 if (collection->parent_idx != -1 || 1083 multiplier_collection == NULL) 1084 usage->resolution_multiplier = effective_multiplier; 1085 1086 } 1087 } 1088 1089 static void hid_apply_multiplier(struct hid_device *hid, 1090 struct hid_field *multiplier) 1091 { 1092 struct hid_report_enum *rep_enum; 1093 struct hid_report *rep; 1094 struct hid_field *field; 1095 struct hid_collection *multiplier_collection; 1096 int effective_multiplier; 1097 int i; 1098 1099 /* 1100 * "The Resolution Multiplier control must be contained in the same 1101 * Logical Collection as the control(s) to which it is to be applied. 1102 * If no Resolution Multiplier is defined, then the Resolution 1103 * Multiplier defaults to 1. If more than one control exists in a 1104 * Logical Collection, the Resolution Multiplier is associated with 1105 * all controls in the collection. If no Logical Collection is 1106 * defined, the Resolution Multiplier is associated with all 1107 * controls in the report." 1108 * HID Usage Table, v1.12, Section 4.3.1, p30 1109 * 1110 * Thus, search from the current collection upwards until we find a 1111 * logical collection. Then search all fields for that same parent 1112 * collection. Those are the fields the multiplier applies to. 1113 * 1114 * If we have more than one multiplier, it will overwrite the 1115 * applicable fields later. 1116 */ 1117 multiplier_collection = &hid->collection[multiplier->usage->collection_index]; 1118 while (multiplier_collection->parent_idx != -1 && 1119 multiplier_collection->type != HID_COLLECTION_LOGICAL) 1120 multiplier_collection = &hid->collection[multiplier_collection->parent_idx]; 1121 1122 effective_multiplier = hid_calculate_multiplier(hid, multiplier); 1123 1124 rep_enum = &hid->report_enum[HID_INPUT_REPORT]; 1125 list_for_each_entry(rep, &rep_enum->report_list, list) { 1126 for (i = 0; i < rep->maxfield; i++) { 1127 field = rep->field[i]; 1128 hid_apply_multiplier_to_field(hid, field, 1129 multiplier_collection, 1130 effective_multiplier); 1131 } 1132 } 1133 } 1134 1135 /* 1136 * hid_setup_resolution_multiplier - set up all resolution multipliers 1137 * 1138 * @device: hid device 1139 * 1140 * Search for all Resolution Multiplier Feature Reports and apply their 1141 * value to all matching Input items. This only updates the internal struct 1142 * fields. 1143 * 1144 * The Resolution Multiplier is applied by the hardware. If the multiplier 1145 * is anything other than 1, the hardware will send pre-multiplied events 1146 * so that the same physical interaction generates an accumulated 1147 * accumulated_value = value * * multiplier 1148 * This may be achieved by sending 1149 * - "value * multiplier" for each event, or 1150 * - "value" but "multiplier" times as frequently, or 1151 * - a combination of the above 1152 * The only guarantee is that the same physical interaction always generates 1153 * an accumulated 'value * multiplier'. 1154 * 1155 * This function must be called before any event processing and after 1156 * any SetRequest to the Resolution Multiplier. 1157 */ 1158 void hid_setup_resolution_multiplier(struct hid_device *hid) 1159 { 1160 struct hid_report_enum *rep_enum; 1161 struct hid_report *rep; 1162 struct hid_usage *usage; 1163 int i, j; 1164 1165 rep_enum = &hid->report_enum[HID_FEATURE_REPORT]; 1166 list_for_each_entry(rep, &rep_enum->report_list, list) { 1167 for (i = 0; i < rep->maxfield; i++) { 1168 /* Ignore if report count is out of bounds. */ 1169 if (rep->field[i]->report_count < 1) 1170 continue; 1171 1172 for (j = 0; j < rep->field[i]->maxusage; j++) { 1173 usage = &rep->field[i]->usage[j]; 1174 if (usage->hid == HID_GD_RESOLUTION_MULTIPLIER) 1175 hid_apply_multiplier(hid, 1176 rep->field[i]); 1177 } 1178 } 1179 } 1180 } 1181 EXPORT_SYMBOL_GPL(hid_setup_resolution_multiplier); 1182 1183 /** 1184 * hid_open_report - open a driver-specific device report 1185 * 1186 * @device: hid device 1187 * 1188 * Parse a report description into a hid_device structure. Reports are 1189 * enumerated, fields are attached to these reports. 1190 * 0 returned on success, otherwise nonzero error value. 1191 * 1192 * This function (or the equivalent hid_parse() macro) should only be 1193 * called from probe() in drivers, before starting the device. 1194 */ 1195 int hid_open_report(struct hid_device *device) 1196 { 1197 struct hid_parser *parser; 1198 struct hid_item item; 1199 unsigned int size; 1200 __u8 *start; 1201 __u8 *buf; 1202 __u8 *end; 1203 __u8 *next; 1204 int ret; 1205 int i; 1206 static int (*dispatch_type[])(struct hid_parser *parser, 1207 struct hid_item *item) = { 1208 hid_parser_main, 1209 hid_parser_global, 1210 hid_parser_local, 1211 hid_parser_reserved 1212 }; 1213 1214 if (WARN_ON(device->status & HID_STAT_PARSED)) 1215 return -EBUSY; 1216 1217 start = device->dev_rdesc; 1218 if (WARN_ON(!start)) 1219 return -ENODEV; 1220 size = device->dev_rsize; 1221 1222 /* call_hid_bpf_rdesc_fixup() ensures we work on a copy of rdesc */ 1223 buf = call_hid_bpf_rdesc_fixup(device, start, &size); 1224 if (buf == NULL) 1225 return -ENOMEM; 1226 1227 if (device->driver->report_fixup) 1228 start = device->driver->report_fixup(device, buf, &size); 1229 else 1230 start = buf; 1231 1232 start = kmemdup(start, size, GFP_KERNEL); 1233 kfree(buf); 1234 if (start == NULL) 1235 return -ENOMEM; 1236 1237 device->rdesc = start; 1238 device->rsize = size; 1239 1240 parser = vzalloc(sizeof(struct hid_parser)); 1241 if (!parser) { 1242 ret = -ENOMEM; 1243 goto alloc_err; 1244 } 1245 1246 parser->device = device; 1247 1248 end = start + size; 1249 1250 device->collection = kcalloc(HID_DEFAULT_NUM_COLLECTIONS, 1251 sizeof(struct hid_collection), GFP_KERNEL); 1252 if (!device->collection) { 1253 ret = -ENOMEM; 1254 goto err; 1255 } 1256 device->collection_size = HID_DEFAULT_NUM_COLLECTIONS; 1257 for (i = 0; i < HID_DEFAULT_NUM_COLLECTIONS; i++) 1258 device->collection[i].parent_idx = -1; 1259 1260 ret = -EINVAL; 1261 while ((next = fetch_item(start, end, &item)) != NULL) { 1262 start = next; 1263 1264 if (item.format != HID_ITEM_FORMAT_SHORT) { 1265 hid_err(device, "unexpected long global item\n"); 1266 goto err; 1267 } 1268 1269 if (dispatch_type[item.type](parser, &item)) { 1270 hid_err(device, "item %u %u %u %u parsing failed\n", 1271 item.format, (unsigned)item.size, 1272 (unsigned)item.type, (unsigned)item.tag); 1273 goto err; 1274 } 1275 1276 if (start == end) { 1277 if (parser->collection_stack_ptr) { 1278 hid_err(device, "unbalanced collection at end of report description\n"); 1279 goto err; 1280 } 1281 if (parser->local.delimiter_depth) { 1282 hid_err(device, "unbalanced delimiter at end of report description\n"); 1283 goto err; 1284 } 1285 1286 /* 1287 * fetch initial values in case the device's 1288 * default multiplier isn't the recommended 1 1289 */ 1290 hid_setup_resolution_multiplier(device); 1291 1292 kfree(parser->collection_stack); 1293 vfree(parser); 1294 device->status |= HID_STAT_PARSED; 1295 1296 return 0; 1297 } 1298 } 1299 1300 hid_err(device, "item fetching failed at offset %u/%u\n", 1301 size - (unsigned int)(end - start), size); 1302 err: 1303 kfree(parser->collection_stack); 1304 alloc_err: 1305 vfree(parser); 1306 hid_close_report(device); 1307 return ret; 1308 } 1309 EXPORT_SYMBOL_GPL(hid_open_report); 1310 1311 /* 1312 * Convert a signed n-bit integer to signed 32-bit integer. Common 1313 * cases are done through the compiler, the screwed things has to be 1314 * done by hand. 1315 */ 1316 1317 static s32 snto32(__u32 value, unsigned n) 1318 { 1319 if (!value || !n) 1320 return 0; 1321 1322 if (n > 32) 1323 n = 32; 1324 1325 switch (n) { 1326 case 8: return ((__s8)value); 1327 case 16: return ((__s16)value); 1328 case 32: return ((__s32)value); 1329 } 1330 return value & (1 << (n - 1)) ? value | (~0U << n) : value; 1331 } 1332 1333 s32 hid_snto32(__u32 value, unsigned n) 1334 { 1335 return snto32(value, n); 1336 } 1337 EXPORT_SYMBOL_GPL(hid_snto32); 1338 1339 /* 1340 * Convert a signed 32-bit integer to a signed n-bit integer. 1341 */ 1342 1343 static u32 s32ton(__s32 value, unsigned n) 1344 { 1345 s32 a = value >> (n - 1); 1346 if (a && a != -1) 1347 return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1; 1348 return value & ((1 << n) - 1); 1349 } 1350 1351 /* 1352 * Extract/implement a data field from/to a little endian report (bit array). 1353 * 1354 * Code sort-of follows HID spec: 1355 * http://www.usb.org/developers/hidpage/HID1_11.pdf 1356 * 1357 * While the USB HID spec allows unlimited length bit fields in "report 1358 * descriptors", most devices never use more than 16 bits. 1359 * One model of UPS is claimed to report "LINEV" as a 32-bit field. 1360 * Search linux-kernel and linux-usb-devel archives for "hid-core extract". 1361 */ 1362 1363 static u32 __extract(u8 *report, unsigned offset, int n) 1364 { 1365 unsigned int idx = offset / 8; 1366 unsigned int bit_nr = 0; 1367 unsigned int bit_shift = offset % 8; 1368 int bits_to_copy = 8 - bit_shift; 1369 u32 value = 0; 1370 u32 mask = n < 32 ? (1U << n) - 1 : ~0U; 1371 1372 while (n > 0) { 1373 value |= ((u32)report[idx] >> bit_shift) << bit_nr; 1374 n -= bits_to_copy; 1375 bit_nr += bits_to_copy; 1376 bits_to_copy = 8; 1377 bit_shift = 0; 1378 idx++; 1379 } 1380 1381 return value & mask; 1382 } 1383 1384 u32 hid_field_extract(const struct hid_device *hid, u8 *report, 1385 unsigned offset, unsigned n) 1386 { 1387 if (n > 32) { 1388 hid_warn_once(hid, "%s() called with n (%d) > 32! (%s)\n", 1389 __func__, n, current->comm); 1390 n = 32; 1391 } 1392 1393 return __extract(report, offset, n); 1394 } 1395 EXPORT_SYMBOL_GPL(hid_field_extract); 1396 1397 /* 1398 * "implement" : set bits in a little endian bit stream. 1399 * Same concepts as "extract" (see comments above). 1400 * The data mangled in the bit stream remains in little endian 1401 * order the whole time. It make more sense to talk about 1402 * endianness of register values by considering a register 1403 * a "cached" copy of the little endian bit stream. 1404 */ 1405 1406 static void __implement(u8 *report, unsigned offset, int n, u32 value) 1407 { 1408 unsigned int idx = offset / 8; 1409 unsigned int bit_shift = offset % 8; 1410 int bits_to_set = 8 - bit_shift; 1411 1412 while (n - bits_to_set >= 0) { 1413 report[idx] &= ~(0xff << bit_shift); 1414 report[idx] |= value << bit_shift; 1415 value >>= bits_to_set; 1416 n -= bits_to_set; 1417 bits_to_set = 8; 1418 bit_shift = 0; 1419 idx++; 1420 } 1421 1422 /* last nibble */ 1423 if (n) { 1424 u8 bit_mask = ((1U << n) - 1); 1425 report[idx] &= ~(bit_mask << bit_shift); 1426 report[idx] |= value << bit_shift; 1427 } 1428 } 1429 1430 static void implement(const struct hid_device *hid, u8 *report, 1431 unsigned offset, unsigned n, u32 value) 1432 { 1433 if (unlikely(n > 32)) { 1434 hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n", 1435 __func__, n, current->comm); 1436 n = 32; 1437 } else if (n < 32) { 1438 u32 m = (1U << n) - 1; 1439 1440 if (unlikely(value > m)) { 1441 hid_warn(hid, 1442 "%s() called with too large value %d (n: %d)! (%s)\n", 1443 __func__, value, n, current->comm); 1444 WARN_ON(1); 1445 value &= m; 1446 } 1447 } 1448 1449 __implement(report, offset, n, value); 1450 } 1451 1452 /* 1453 * Search an array for a value. 1454 */ 1455 1456 static int search(__s32 *array, __s32 value, unsigned n) 1457 { 1458 while (n--) { 1459 if (*array++ == value) 1460 return 0; 1461 } 1462 return -1; 1463 } 1464 1465 /** 1466 * hid_match_report - check if driver's raw_event should be called 1467 * 1468 * @hid: hid device 1469 * @report: hid report to match against 1470 * 1471 * compare hid->driver->report_table->report_type to report->type 1472 */ 1473 static int hid_match_report(struct hid_device *hid, struct hid_report *report) 1474 { 1475 const struct hid_report_id *id = hid->driver->report_table; 1476 1477 if (!id) /* NULL means all */ 1478 return 1; 1479 1480 for (; id->report_type != HID_TERMINATOR; id++) 1481 if (id->report_type == HID_ANY_ID || 1482 id->report_type == report->type) 1483 return 1; 1484 return 0; 1485 } 1486 1487 /** 1488 * hid_match_usage - check if driver's event should be called 1489 * 1490 * @hid: hid device 1491 * @usage: usage to match against 1492 * 1493 * compare hid->driver->usage_table->usage_{type,code} to 1494 * usage->usage_{type,code} 1495 */ 1496 static int hid_match_usage(struct hid_device *hid, struct hid_usage *usage) 1497 { 1498 const struct hid_usage_id *id = hid->driver->usage_table; 1499 1500 if (!id) /* NULL means all */ 1501 return 1; 1502 1503 for (; id->usage_type != HID_ANY_ID - 1; id++) 1504 if ((id->usage_hid == HID_ANY_ID || 1505 id->usage_hid == usage->hid) && 1506 (id->usage_type == HID_ANY_ID || 1507 id->usage_type == usage->type) && 1508 (id->usage_code == HID_ANY_ID || 1509 id->usage_code == usage->code)) 1510 return 1; 1511 return 0; 1512 } 1513 1514 static void hid_process_event(struct hid_device *hid, struct hid_field *field, 1515 struct hid_usage *usage, __s32 value, int interrupt) 1516 { 1517 struct hid_driver *hdrv = hid->driver; 1518 int ret; 1519 1520 if (!list_empty(&hid->debug_list)) 1521 hid_dump_input(hid, usage, value); 1522 1523 if (hdrv && hdrv->event && hid_match_usage(hid, usage)) { 1524 ret = hdrv->event(hid, field, usage, value); 1525 if (ret != 0) { 1526 if (ret < 0) 1527 hid_err(hid, "%s's event failed with %d\n", 1528 hdrv->name, ret); 1529 return; 1530 } 1531 } 1532 1533 if (hid->claimed & HID_CLAIMED_INPUT) 1534 hidinput_hid_event(hid, field, usage, value); 1535 if (hid->claimed & HID_CLAIMED_HIDDEV && interrupt && hid->hiddev_hid_event) 1536 hid->hiddev_hid_event(hid, field, usage, value); 1537 } 1538 1539 /* 1540 * Checks if the given value is valid within this field 1541 */ 1542 static inline int hid_array_value_is_valid(struct hid_field *field, 1543 __s32 value) 1544 { 1545 __s32 min = field->logical_minimum; 1546 1547 /* 1548 * Value needs to be between logical min and max, and 1549 * (value - min) is used as an index in the usage array. 1550 * This array is of size field->maxusage 1551 */ 1552 return value >= min && 1553 value <= field->logical_maximum && 1554 value - min < field->maxusage; 1555 } 1556 1557 /* 1558 * Fetch the field from the data. The field content is stored for next 1559 * report processing (we do differential reporting to the layer). 1560 */ 1561 static void hid_input_fetch_field(struct hid_device *hid, 1562 struct hid_field *field, 1563 __u8 *data) 1564 { 1565 unsigned n; 1566 unsigned count = field->report_count; 1567 unsigned offset = field->report_offset; 1568 unsigned size = field->report_size; 1569 __s32 min = field->logical_minimum; 1570 __s32 *value; 1571 1572 value = field->new_value; 1573 memset(value, 0, count * sizeof(__s32)); 1574 field->ignored = false; 1575 1576 for (n = 0; n < count; n++) { 1577 1578 value[n] = min < 0 ? 1579 snto32(hid_field_extract(hid, data, offset + n * size, 1580 size), size) : 1581 hid_field_extract(hid, data, offset + n * size, size); 1582 1583 /* Ignore report if ErrorRollOver */ 1584 if (!(field->flags & HID_MAIN_ITEM_VARIABLE) && 1585 hid_array_value_is_valid(field, value[n]) && 1586 field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1) { 1587 field->ignored = true; 1588 return; 1589 } 1590 } 1591 } 1592 1593 /* 1594 * Process a received variable field. 1595 */ 1596 1597 static void hid_input_var_field(struct hid_device *hid, 1598 struct hid_field *field, 1599 int interrupt) 1600 { 1601 unsigned int count = field->report_count; 1602 __s32 *value = field->new_value; 1603 unsigned int n; 1604 1605 for (n = 0; n < count; n++) 1606 hid_process_event(hid, 1607 field, 1608 &field->usage[n], 1609 value[n], 1610 interrupt); 1611 1612 memcpy(field->value, value, count * sizeof(__s32)); 1613 } 1614 1615 /* 1616 * Process a received array field. The field content is stored for 1617 * next report processing (we do differential reporting to the layer). 1618 */ 1619 1620 static void hid_input_array_field(struct hid_device *hid, 1621 struct hid_field *field, 1622 int interrupt) 1623 { 1624 unsigned int n; 1625 unsigned int count = field->report_count; 1626 __s32 min = field->logical_minimum; 1627 __s32 *value; 1628 1629 value = field->new_value; 1630 1631 /* ErrorRollOver */ 1632 if (field->ignored) 1633 return; 1634 1635 for (n = 0; n < count; n++) { 1636 if (hid_array_value_is_valid(field, field->value[n]) && 1637 search(value, field->value[n], count)) 1638 hid_process_event(hid, 1639 field, 1640 &field->usage[field->value[n] - min], 1641 0, 1642 interrupt); 1643 1644 if (hid_array_value_is_valid(field, value[n]) && 1645 search(field->value, value[n], count)) 1646 hid_process_event(hid, 1647 field, 1648 &field->usage[value[n] - min], 1649 1, 1650 interrupt); 1651 } 1652 1653 memcpy(field->value, value, count * sizeof(__s32)); 1654 } 1655 1656 /* 1657 * Analyse a received report, and fetch the data from it. The field 1658 * content is stored for next report processing (we do differential 1659 * reporting to the layer). 1660 */ 1661 static void hid_process_report(struct hid_device *hid, 1662 struct hid_report *report, 1663 __u8 *data, 1664 int interrupt) 1665 { 1666 unsigned int a; 1667 struct hid_field_entry *entry; 1668 struct hid_field *field; 1669 1670 /* first retrieve all incoming values in data */ 1671 for (a = 0; a < report->maxfield; a++) 1672 hid_input_fetch_field(hid, report->field[a], data); 1673 1674 if (!list_empty(&report->field_entry_list)) { 1675 /* INPUT_REPORT, we have a priority list of fields */ 1676 list_for_each_entry(entry, 1677 &report->field_entry_list, 1678 list) { 1679 field = entry->field; 1680 1681 if (field->flags & HID_MAIN_ITEM_VARIABLE) 1682 hid_process_event(hid, 1683 field, 1684 &field->usage[entry->index], 1685 field->new_value[entry->index], 1686 interrupt); 1687 else 1688 hid_input_array_field(hid, field, interrupt); 1689 } 1690 1691 /* we need to do the memcpy at the end for var items */ 1692 for (a = 0; a < report->maxfield; a++) { 1693 field = report->field[a]; 1694 1695 if (field->flags & HID_MAIN_ITEM_VARIABLE) 1696 memcpy(field->value, field->new_value, 1697 field->report_count * sizeof(__s32)); 1698 } 1699 } else { 1700 /* FEATURE_REPORT, regular processing */ 1701 for (a = 0; a < report->maxfield; a++) { 1702 field = report->field[a]; 1703 1704 if (field->flags & HID_MAIN_ITEM_VARIABLE) 1705 hid_input_var_field(hid, field, interrupt); 1706 else 1707 hid_input_array_field(hid, field, interrupt); 1708 } 1709 } 1710 } 1711 1712 /* 1713 * Insert a given usage_index in a field in the list 1714 * of processed usages in the report. 1715 * 1716 * The elements of lower priority score are processed 1717 * first. 1718 */ 1719 static void __hid_insert_field_entry(struct hid_device *hid, 1720 struct hid_report *report, 1721 struct hid_field_entry *entry, 1722 struct hid_field *field, 1723 unsigned int usage_index) 1724 { 1725 struct hid_field_entry *next; 1726 1727 entry->field = field; 1728 entry->index = usage_index; 1729 entry->priority = field->usages_priorities[usage_index]; 1730 1731 /* insert the element at the correct position */ 1732 list_for_each_entry(next, 1733 &report->field_entry_list, 1734 list) { 1735 /* 1736 * the priority of our element is strictly higher 1737 * than the next one, insert it before 1738 */ 1739 if (entry->priority > next->priority) { 1740 list_add_tail(&entry->list, &next->list); 1741 return; 1742 } 1743 } 1744 1745 /* lowest priority score: insert at the end */ 1746 list_add_tail(&entry->list, &report->field_entry_list); 1747 } 1748 1749 static void hid_report_process_ordering(struct hid_device *hid, 1750 struct hid_report *report) 1751 { 1752 struct hid_field *field; 1753 struct hid_field_entry *entries; 1754 unsigned int a, u, usages; 1755 unsigned int count = 0; 1756 1757 /* count the number of individual fields in the report */ 1758 for (a = 0; a < report->maxfield; a++) { 1759 field = report->field[a]; 1760 1761 if (field->flags & HID_MAIN_ITEM_VARIABLE) 1762 count += field->report_count; 1763 else 1764 count++; 1765 } 1766 1767 /* allocate the memory to process the fields */ 1768 entries = kcalloc(count, sizeof(*entries), GFP_KERNEL); 1769 if (!entries) 1770 return; 1771 1772 report->field_entries = entries; 1773 1774 /* 1775 * walk through all fields in the report and 1776 * store them by priority order in report->field_entry_list 1777 * 1778 * - Var elements are individualized (field + usage_index) 1779 * - Arrays are taken as one, we can not chose an order for them 1780 */ 1781 usages = 0; 1782 for (a = 0; a < report->maxfield; a++) { 1783 field = report->field[a]; 1784 1785 if (field->flags & HID_MAIN_ITEM_VARIABLE) { 1786 for (u = 0; u < field->report_count; u++) { 1787 __hid_insert_field_entry(hid, report, 1788 &entries[usages], 1789 field, u); 1790 usages++; 1791 } 1792 } else { 1793 __hid_insert_field_entry(hid, report, &entries[usages], 1794 field, 0); 1795 usages++; 1796 } 1797 } 1798 } 1799 1800 static void hid_process_ordering(struct hid_device *hid) 1801 { 1802 struct hid_report *report; 1803 struct hid_report_enum *report_enum = &hid->report_enum[HID_INPUT_REPORT]; 1804 1805 list_for_each_entry(report, &report_enum->report_list, list) 1806 hid_report_process_ordering(hid, report); 1807 } 1808 1809 /* 1810 * Output the field into the report. 1811 */ 1812 1813 static void hid_output_field(const struct hid_device *hid, 1814 struct hid_field *field, __u8 *data) 1815 { 1816 unsigned count = field->report_count; 1817 unsigned offset = field->report_offset; 1818 unsigned size = field->report_size; 1819 unsigned n; 1820 1821 for (n = 0; n < count; n++) { 1822 if (field->logical_minimum < 0) /* signed values */ 1823 implement(hid, data, offset + n * size, size, 1824 s32ton(field->value[n], size)); 1825 else /* unsigned values */ 1826 implement(hid, data, offset + n * size, size, 1827 field->value[n]); 1828 } 1829 } 1830 1831 /* 1832 * Compute the size of a report. 1833 */ 1834 static size_t hid_compute_report_size(struct hid_report *report) 1835 { 1836 if (report->size) 1837 return ((report->size - 1) >> 3) + 1; 1838 1839 return 0; 1840 } 1841 1842 /* 1843 * Create a report. 'data' has to be allocated using 1844 * hid_alloc_report_buf() so that it has proper size. 1845 */ 1846 1847 void hid_output_report(struct hid_report *report, __u8 *data) 1848 { 1849 unsigned n; 1850 1851 if (report->id > 0) 1852 *data++ = report->id; 1853 1854 memset(data, 0, hid_compute_report_size(report)); 1855 for (n = 0; n < report->maxfield; n++) 1856 hid_output_field(report->device, report->field[n], data); 1857 } 1858 EXPORT_SYMBOL_GPL(hid_output_report); 1859 1860 /* 1861 * Allocator for buffer that is going to be passed to hid_output_report() 1862 */ 1863 u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags) 1864 { 1865 /* 1866 * 7 extra bytes are necessary to achieve proper functionality 1867 * of implement() working on 8 byte chunks 1868 */ 1869 1870 u32 len = hid_report_len(report) + 7; 1871 1872 return kmalloc(len, flags); 1873 } 1874 EXPORT_SYMBOL_GPL(hid_alloc_report_buf); 1875 1876 /* 1877 * Set a field value. The report this field belongs to has to be 1878 * created and transferred to the device, to set this value in the 1879 * device. 1880 */ 1881 1882 int hid_set_field(struct hid_field *field, unsigned offset, __s32 value) 1883 { 1884 unsigned size; 1885 1886 if (!field) 1887 return -1; 1888 1889 size = field->report_size; 1890 1891 hid_dump_input(field->report->device, field->usage + offset, value); 1892 1893 if (offset >= field->report_count) { 1894 hid_err(field->report->device, "offset (%d) exceeds report_count (%d)\n", 1895 offset, field->report_count); 1896 return -1; 1897 } 1898 if (field->logical_minimum < 0) { 1899 if (value != snto32(s32ton(value, size), size)) { 1900 hid_err(field->report->device, "value %d is out of range\n", value); 1901 return -1; 1902 } 1903 } 1904 field->value[offset] = value; 1905 return 0; 1906 } 1907 EXPORT_SYMBOL_GPL(hid_set_field); 1908 1909 static struct hid_report *hid_get_report(struct hid_report_enum *report_enum, 1910 const u8 *data) 1911 { 1912 struct hid_report *report; 1913 unsigned int n = 0; /* Normally report number is 0 */ 1914 1915 /* Device uses numbered reports, data[0] is report number */ 1916 if (report_enum->numbered) 1917 n = *data; 1918 1919 report = report_enum->report_id_hash[n]; 1920 if (report == NULL) 1921 dbg_hid("undefined report_id %u received\n", n); 1922 1923 return report; 1924 } 1925 1926 /* 1927 * Implement a generic .request() callback, using .raw_request() 1928 * DO NOT USE in hid drivers directly, but through hid_hw_request instead. 1929 */ 1930 int __hid_request(struct hid_device *hid, struct hid_report *report, 1931 enum hid_class_request reqtype) 1932 { 1933 char *buf; 1934 int ret; 1935 u32 len; 1936 1937 buf = hid_alloc_report_buf(report, GFP_KERNEL); 1938 if (!buf) 1939 return -ENOMEM; 1940 1941 len = hid_report_len(report); 1942 1943 if (reqtype == HID_REQ_SET_REPORT) 1944 hid_output_report(report, buf); 1945 1946 ret = hid->ll_driver->raw_request(hid, report->id, buf, len, 1947 report->type, reqtype); 1948 if (ret < 0) { 1949 dbg_hid("unable to complete request: %d\n", ret); 1950 goto out; 1951 } 1952 1953 if (reqtype == HID_REQ_GET_REPORT) 1954 hid_input_report(hid, report->type, buf, ret, 0); 1955 1956 ret = 0; 1957 1958 out: 1959 kfree(buf); 1960 return ret; 1961 } 1962 EXPORT_SYMBOL_GPL(__hid_request); 1963 1964 int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size, 1965 int interrupt) 1966 { 1967 struct hid_report_enum *report_enum = hid->report_enum + type; 1968 struct hid_report *report; 1969 struct hid_driver *hdrv; 1970 int max_buffer_size = HID_MAX_BUFFER_SIZE; 1971 u32 rsize, csize = size; 1972 u8 *cdata = data; 1973 int ret = 0; 1974 1975 report = hid_get_report(report_enum, data); 1976 if (!report) 1977 goto out; 1978 1979 if (report_enum->numbered) { 1980 cdata++; 1981 csize--; 1982 } 1983 1984 rsize = hid_compute_report_size(report); 1985 1986 if (hid->ll_driver->max_buffer_size) 1987 max_buffer_size = hid->ll_driver->max_buffer_size; 1988 1989 if (report_enum->numbered && rsize >= max_buffer_size) 1990 rsize = max_buffer_size - 1; 1991 else if (rsize > max_buffer_size) 1992 rsize = max_buffer_size; 1993 1994 if (csize < rsize) { 1995 dbg_hid("report %d is too short, (%d < %d)\n", report->id, 1996 csize, rsize); 1997 memset(cdata + csize, 0, rsize - csize); 1998 } 1999 2000 if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event) 2001 hid->hiddev_report_event(hid, report); 2002 if (hid->claimed & HID_CLAIMED_HIDRAW) { 2003 ret = hidraw_report_event(hid, data, size); 2004 if (ret) 2005 goto out; 2006 } 2007 2008 if (hid->claimed != HID_CLAIMED_HIDRAW && report->maxfield) { 2009 hid_process_report(hid, report, cdata, interrupt); 2010 hdrv = hid->driver; 2011 if (hdrv && hdrv->report) 2012 hdrv->report(hid, report); 2013 } 2014 2015 if (hid->claimed & HID_CLAIMED_INPUT) 2016 hidinput_report_event(hid, report); 2017 out: 2018 return ret; 2019 } 2020 EXPORT_SYMBOL_GPL(hid_report_raw_event); 2021 2022 /** 2023 * hid_input_report - report data from lower layer (usb, bt...) 2024 * 2025 * @hid: hid device 2026 * @type: HID report type (HID_*_REPORT) 2027 * @data: report contents 2028 * @size: size of data parameter 2029 * @interrupt: distinguish between interrupt and control transfers 2030 * 2031 * This is data entry for lower layers. 2032 */ 2033 int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size, 2034 int interrupt) 2035 { 2036 struct hid_report_enum *report_enum; 2037 struct hid_driver *hdrv; 2038 struct hid_report *report; 2039 int ret = 0; 2040 2041 if (!hid) 2042 return -ENODEV; 2043 2044 if (down_trylock(&hid->driver_input_lock)) 2045 return -EBUSY; 2046 2047 if (!hid->driver) { 2048 ret = -ENODEV; 2049 goto unlock; 2050 } 2051 report_enum = hid->report_enum + type; 2052 hdrv = hid->driver; 2053 2054 data = dispatch_hid_bpf_device_event(hid, type, data, &size, interrupt); 2055 if (IS_ERR(data)) { 2056 ret = PTR_ERR(data); 2057 goto unlock; 2058 } 2059 2060 if (!size) { 2061 dbg_hid("empty report\n"); 2062 ret = -1; 2063 goto unlock; 2064 } 2065 2066 /* Avoid unnecessary overhead if debugfs is disabled */ 2067 if (!list_empty(&hid->debug_list)) 2068 hid_dump_report(hid, type, data, size); 2069 2070 report = hid_get_report(report_enum, data); 2071 2072 if (!report) { 2073 ret = -1; 2074 goto unlock; 2075 } 2076 2077 if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) { 2078 ret = hdrv->raw_event(hid, report, data, size); 2079 if (ret < 0) 2080 goto unlock; 2081 } 2082 2083 ret = hid_report_raw_event(hid, type, data, size, interrupt); 2084 2085 unlock: 2086 up(&hid->driver_input_lock); 2087 return ret; 2088 } 2089 EXPORT_SYMBOL_GPL(hid_input_report); 2090 2091 bool hid_match_one_id(const struct hid_device *hdev, 2092 const struct hid_device_id *id) 2093 { 2094 return (id->bus == HID_BUS_ANY || id->bus == hdev->bus) && 2095 (id->group == HID_GROUP_ANY || id->group == hdev->group) && 2096 (id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) && 2097 (id->product == HID_ANY_ID || id->product == hdev->product); 2098 } 2099 2100 const struct hid_device_id *hid_match_id(const struct hid_device *hdev, 2101 const struct hid_device_id *id) 2102 { 2103 for (; id->bus; id++) 2104 if (hid_match_one_id(hdev, id)) 2105 return id; 2106 2107 return NULL; 2108 } 2109 EXPORT_SYMBOL_GPL(hid_match_id); 2110 2111 static const struct hid_device_id hid_hiddev_list[] = { 2112 { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS) }, 2113 { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS1) }, 2114 { } 2115 }; 2116 2117 static bool hid_hiddev(struct hid_device *hdev) 2118 { 2119 return !!hid_match_id(hdev, hid_hiddev_list); 2120 } 2121 2122 2123 static ssize_t 2124 read_report_descriptor(struct file *filp, struct kobject *kobj, 2125 struct bin_attribute *attr, 2126 char *buf, loff_t off, size_t count) 2127 { 2128 struct device *dev = kobj_to_dev(kobj); 2129 struct hid_device *hdev = to_hid_device(dev); 2130 2131 if (off >= hdev->rsize) 2132 return 0; 2133 2134 if (off + count > hdev->rsize) 2135 count = hdev->rsize - off; 2136 2137 memcpy(buf, hdev->rdesc + off, count); 2138 2139 return count; 2140 } 2141 2142 static ssize_t 2143 show_country(struct device *dev, struct device_attribute *attr, 2144 char *buf) 2145 { 2146 struct hid_device *hdev = to_hid_device(dev); 2147 2148 return sprintf(buf, "%02x\n", hdev->country & 0xff); 2149 } 2150 2151 static struct bin_attribute dev_bin_attr_report_desc = { 2152 .attr = { .name = "report_descriptor", .mode = 0444 }, 2153 .read = read_report_descriptor, 2154 .size = HID_MAX_DESCRIPTOR_SIZE, 2155 }; 2156 2157 static const struct device_attribute dev_attr_country = { 2158 .attr = { .name = "country", .mode = 0444 }, 2159 .show = show_country, 2160 }; 2161 2162 int hid_connect(struct hid_device *hdev, unsigned int connect_mask) 2163 { 2164 static const char *types[] = { "Device", "Pointer", "Mouse", "Device", 2165 "Joystick", "Gamepad", "Keyboard", "Keypad", 2166 "Multi-Axis Controller" 2167 }; 2168 const char *type, *bus; 2169 char buf[64] = ""; 2170 unsigned int i; 2171 int len; 2172 int ret; 2173 2174 ret = hid_bpf_connect_device(hdev); 2175 if (ret) 2176 return ret; 2177 2178 if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE) 2179 connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV); 2180 if (hdev->quirks & HID_QUIRK_HIDINPUT_FORCE) 2181 connect_mask |= HID_CONNECT_HIDINPUT_FORCE; 2182 if (hdev->bus != BUS_USB) 2183 connect_mask &= ~HID_CONNECT_HIDDEV; 2184 if (hid_hiddev(hdev)) 2185 connect_mask |= HID_CONNECT_HIDDEV_FORCE; 2186 2187 if ((connect_mask & HID_CONNECT_HIDINPUT) && !hidinput_connect(hdev, 2188 connect_mask & HID_CONNECT_HIDINPUT_FORCE)) 2189 hdev->claimed |= HID_CLAIMED_INPUT; 2190 2191 if ((connect_mask & HID_CONNECT_HIDDEV) && hdev->hiddev_connect && 2192 !hdev->hiddev_connect(hdev, 2193 connect_mask & HID_CONNECT_HIDDEV_FORCE)) 2194 hdev->claimed |= HID_CLAIMED_HIDDEV; 2195 if ((connect_mask & HID_CONNECT_HIDRAW) && !hidraw_connect(hdev)) 2196 hdev->claimed |= HID_CLAIMED_HIDRAW; 2197 2198 if (connect_mask & HID_CONNECT_DRIVER) 2199 hdev->claimed |= HID_CLAIMED_DRIVER; 2200 2201 /* Drivers with the ->raw_event callback set are not required to connect 2202 * to any other listener. */ 2203 if (!hdev->claimed && !hdev->driver->raw_event) { 2204 hid_err(hdev, "device has no listeners, quitting\n"); 2205 return -ENODEV; 2206 } 2207 2208 hid_process_ordering(hdev); 2209 2210 if ((hdev->claimed & HID_CLAIMED_INPUT) && 2211 (connect_mask & HID_CONNECT_FF) && hdev->ff_init) 2212 hdev->ff_init(hdev); 2213 2214 len = 0; 2215 if (hdev->claimed & HID_CLAIMED_INPUT) 2216 len += sprintf(buf + len, "input"); 2217 if (hdev->claimed & HID_CLAIMED_HIDDEV) 2218 len += sprintf(buf + len, "%shiddev%d", len ? "," : "", 2219 ((struct hiddev *)hdev->hiddev)->minor); 2220 if (hdev->claimed & HID_CLAIMED_HIDRAW) 2221 len += sprintf(buf + len, "%shidraw%d", len ? "," : "", 2222 ((struct hidraw *)hdev->hidraw)->minor); 2223 2224 type = "Device"; 2225 for (i = 0; i < hdev->maxcollection; i++) { 2226 struct hid_collection *col = &hdev->collection[i]; 2227 if (col->type == HID_COLLECTION_APPLICATION && 2228 (col->usage & HID_USAGE_PAGE) == HID_UP_GENDESK && 2229 (col->usage & 0xffff) < ARRAY_SIZE(types)) { 2230 type = types[col->usage & 0xffff]; 2231 break; 2232 } 2233 } 2234 2235 switch (hdev->bus) { 2236 case BUS_USB: 2237 bus = "USB"; 2238 break; 2239 case BUS_BLUETOOTH: 2240 bus = "BLUETOOTH"; 2241 break; 2242 case BUS_I2C: 2243 bus = "I2C"; 2244 break; 2245 case BUS_VIRTUAL: 2246 bus = "VIRTUAL"; 2247 break; 2248 case BUS_INTEL_ISHTP: 2249 case BUS_AMD_SFH: 2250 bus = "SENSOR HUB"; 2251 break; 2252 default: 2253 bus = "<UNKNOWN>"; 2254 } 2255 2256 ret = device_create_file(&hdev->dev, &dev_attr_country); 2257 if (ret) 2258 hid_warn(hdev, 2259 "can't create sysfs country code attribute err: %d\n", ret); 2260 2261 hid_info(hdev, "%s: %s HID v%x.%02x %s [%s] on %s\n", 2262 buf, bus, hdev->version >> 8, hdev->version & 0xff, 2263 type, hdev->name, hdev->phys); 2264 2265 return 0; 2266 } 2267 EXPORT_SYMBOL_GPL(hid_connect); 2268 2269 void hid_disconnect(struct hid_device *hdev) 2270 { 2271 device_remove_file(&hdev->dev, &dev_attr_country); 2272 if (hdev->claimed & HID_CLAIMED_INPUT) 2273 hidinput_disconnect(hdev); 2274 if (hdev->claimed & HID_CLAIMED_HIDDEV) 2275 hdev->hiddev_disconnect(hdev); 2276 if (hdev->claimed & HID_CLAIMED_HIDRAW) 2277 hidraw_disconnect(hdev); 2278 hdev->claimed = 0; 2279 2280 hid_bpf_disconnect_device(hdev); 2281 } 2282 EXPORT_SYMBOL_GPL(hid_disconnect); 2283 2284 /** 2285 * hid_hw_start - start underlying HW 2286 * @hdev: hid device 2287 * @connect_mask: which outputs to connect, see HID_CONNECT_* 2288 * 2289 * Call this in probe function *after* hid_parse. This will setup HW 2290 * buffers and start the device (if not defeirred to device open). 2291 * hid_hw_stop must be called if this was successful. 2292 */ 2293 int hid_hw_start(struct hid_device *hdev, unsigned int connect_mask) 2294 { 2295 int error; 2296 2297 error = hdev->ll_driver->start(hdev); 2298 if (error) 2299 return error; 2300 2301 if (connect_mask) { 2302 error = hid_connect(hdev, connect_mask); 2303 if (error) { 2304 hdev->ll_driver->stop(hdev); 2305 return error; 2306 } 2307 } 2308 2309 return 0; 2310 } 2311 EXPORT_SYMBOL_GPL(hid_hw_start); 2312 2313 /** 2314 * hid_hw_stop - stop underlying HW 2315 * @hdev: hid device 2316 * 2317 * This is usually called from remove function or from probe when something 2318 * failed and hid_hw_start was called already. 2319 */ 2320 void hid_hw_stop(struct hid_device *hdev) 2321 { 2322 hid_disconnect(hdev); 2323 hdev->ll_driver->stop(hdev); 2324 } 2325 EXPORT_SYMBOL_GPL(hid_hw_stop); 2326 2327 /** 2328 * hid_hw_open - signal underlying HW to start delivering events 2329 * @hdev: hid device 2330 * 2331 * Tell underlying HW to start delivering events from the device. 2332 * This function should be called sometime after successful call 2333 * to hid_hw_start(). 2334 */ 2335 int hid_hw_open(struct hid_device *hdev) 2336 { 2337 int ret; 2338 2339 ret = mutex_lock_killable(&hdev->ll_open_lock); 2340 if (ret) 2341 return ret; 2342 2343 if (!hdev->ll_open_count++) { 2344 ret = hdev->ll_driver->open(hdev); 2345 if (ret) 2346 hdev->ll_open_count--; 2347 } 2348 2349 mutex_unlock(&hdev->ll_open_lock); 2350 return ret; 2351 } 2352 EXPORT_SYMBOL_GPL(hid_hw_open); 2353 2354 /** 2355 * hid_hw_close - signal underlaying HW to stop delivering events 2356 * 2357 * @hdev: hid device 2358 * 2359 * This function indicates that we are not interested in the events 2360 * from this device anymore. Delivery of events may or may not stop, 2361 * depending on the number of users still outstanding. 2362 */ 2363 void hid_hw_close(struct hid_device *hdev) 2364 { 2365 mutex_lock(&hdev->ll_open_lock); 2366 if (!--hdev->ll_open_count) 2367 hdev->ll_driver->close(hdev); 2368 mutex_unlock(&hdev->ll_open_lock); 2369 } 2370 EXPORT_SYMBOL_GPL(hid_hw_close); 2371 2372 /** 2373 * hid_hw_request - send report request to device 2374 * 2375 * @hdev: hid device 2376 * @report: report to send 2377 * @reqtype: hid request type 2378 */ 2379 void hid_hw_request(struct hid_device *hdev, 2380 struct hid_report *report, enum hid_class_request reqtype) 2381 { 2382 if (hdev->ll_driver->request) 2383 return hdev->ll_driver->request(hdev, report, reqtype); 2384 2385 __hid_request(hdev, report, reqtype); 2386 } 2387 EXPORT_SYMBOL_GPL(hid_hw_request); 2388 2389 /** 2390 * hid_hw_raw_request - send report request to device 2391 * 2392 * @hdev: hid device 2393 * @reportnum: report ID 2394 * @buf: in/out data to transfer 2395 * @len: length of buf 2396 * @rtype: HID report type 2397 * @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT 2398 * 2399 * Return: count of data transferred, negative if error 2400 * 2401 * Same behavior as hid_hw_request, but with raw buffers instead. 2402 */ 2403 int hid_hw_raw_request(struct hid_device *hdev, 2404 unsigned char reportnum, __u8 *buf, 2405 size_t len, enum hid_report_type rtype, enum hid_class_request reqtype) 2406 { 2407 unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE; 2408 2409 if (hdev->ll_driver->max_buffer_size) 2410 max_buffer_size = hdev->ll_driver->max_buffer_size; 2411 2412 if (len < 1 || len > max_buffer_size || !buf) 2413 return -EINVAL; 2414 2415 return hdev->ll_driver->raw_request(hdev, reportnum, buf, len, 2416 rtype, reqtype); 2417 } 2418 EXPORT_SYMBOL_GPL(hid_hw_raw_request); 2419 2420 /** 2421 * hid_hw_output_report - send output report to device 2422 * 2423 * @hdev: hid device 2424 * @buf: raw data to transfer 2425 * @len: length of buf 2426 * 2427 * Return: count of data transferred, negative if error 2428 */ 2429 int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len) 2430 { 2431 unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE; 2432 2433 if (hdev->ll_driver->max_buffer_size) 2434 max_buffer_size = hdev->ll_driver->max_buffer_size; 2435 2436 if (len < 1 || len > max_buffer_size || !buf) 2437 return -EINVAL; 2438 2439 if (hdev->ll_driver->output_report) 2440 return hdev->ll_driver->output_report(hdev, buf, len); 2441 2442 return -ENOSYS; 2443 } 2444 EXPORT_SYMBOL_GPL(hid_hw_output_report); 2445 2446 #ifdef CONFIG_PM 2447 int hid_driver_suspend(struct hid_device *hdev, pm_message_t state) 2448 { 2449 if (hdev->driver && hdev->driver->suspend) 2450 return hdev->driver->suspend(hdev, state); 2451 2452 return 0; 2453 } 2454 EXPORT_SYMBOL_GPL(hid_driver_suspend); 2455 2456 int hid_driver_reset_resume(struct hid_device *hdev) 2457 { 2458 if (hdev->driver && hdev->driver->reset_resume) 2459 return hdev->driver->reset_resume(hdev); 2460 2461 return 0; 2462 } 2463 EXPORT_SYMBOL_GPL(hid_driver_reset_resume); 2464 2465 int hid_driver_resume(struct hid_device *hdev) 2466 { 2467 if (hdev->driver && hdev->driver->resume) 2468 return hdev->driver->resume(hdev); 2469 2470 return 0; 2471 } 2472 EXPORT_SYMBOL_GPL(hid_driver_resume); 2473 #endif /* CONFIG_PM */ 2474 2475 struct hid_dynid { 2476 struct list_head list; 2477 struct hid_device_id id; 2478 }; 2479 2480 /** 2481 * new_id_store - add a new HID device ID to this driver and re-probe devices 2482 * @drv: target device driver 2483 * @buf: buffer for scanning device ID data 2484 * @count: input size 2485 * 2486 * Adds a new dynamic hid device ID to this driver, 2487 * and causes the driver to probe for all devices again. 2488 */ 2489 static ssize_t new_id_store(struct device_driver *drv, const char *buf, 2490 size_t count) 2491 { 2492 struct hid_driver *hdrv = to_hid_driver(drv); 2493 struct hid_dynid *dynid; 2494 __u32 bus, vendor, product; 2495 unsigned long driver_data = 0; 2496 int ret; 2497 2498 ret = sscanf(buf, "%x %x %x %lx", 2499 &bus, &vendor, &product, &driver_data); 2500 if (ret < 3) 2501 return -EINVAL; 2502 2503 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); 2504 if (!dynid) 2505 return -ENOMEM; 2506 2507 dynid->id.bus = bus; 2508 dynid->id.group = HID_GROUP_ANY; 2509 dynid->id.vendor = vendor; 2510 dynid->id.product = product; 2511 dynid->id.driver_data = driver_data; 2512 2513 spin_lock(&hdrv->dyn_lock); 2514 list_add_tail(&dynid->list, &hdrv->dyn_list); 2515 spin_unlock(&hdrv->dyn_lock); 2516 2517 ret = driver_attach(&hdrv->driver); 2518 2519 return ret ? : count; 2520 } 2521 static DRIVER_ATTR_WO(new_id); 2522 2523 static struct attribute *hid_drv_attrs[] = { 2524 &driver_attr_new_id.attr, 2525 NULL, 2526 }; 2527 ATTRIBUTE_GROUPS(hid_drv); 2528 2529 static void hid_free_dynids(struct hid_driver *hdrv) 2530 { 2531 struct hid_dynid *dynid, *n; 2532 2533 spin_lock(&hdrv->dyn_lock); 2534 list_for_each_entry_safe(dynid, n, &hdrv->dyn_list, list) { 2535 list_del(&dynid->list); 2536 kfree(dynid); 2537 } 2538 spin_unlock(&hdrv->dyn_lock); 2539 } 2540 2541 const struct hid_device_id *hid_match_device(struct hid_device *hdev, 2542 struct hid_driver *hdrv) 2543 { 2544 struct hid_dynid *dynid; 2545 2546 spin_lock(&hdrv->dyn_lock); 2547 list_for_each_entry(dynid, &hdrv->dyn_list, list) { 2548 if (hid_match_one_id(hdev, &dynid->id)) { 2549 spin_unlock(&hdrv->dyn_lock); 2550 return &dynid->id; 2551 } 2552 } 2553 spin_unlock(&hdrv->dyn_lock); 2554 2555 return hid_match_id(hdev, hdrv->id_table); 2556 } 2557 EXPORT_SYMBOL_GPL(hid_match_device); 2558 2559 static int hid_bus_match(struct device *dev, struct device_driver *drv) 2560 { 2561 struct hid_driver *hdrv = to_hid_driver(drv); 2562 struct hid_device *hdev = to_hid_device(dev); 2563 2564 return hid_match_device(hdev, hdrv) != NULL; 2565 } 2566 2567 /** 2568 * hid_compare_device_paths - check if both devices share the same path 2569 * @hdev_a: hid device 2570 * @hdev_b: hid device 2571 * @separator: char to use as separator 2572 * 2573 * Check if two devices share the same path up to the last occurrence of 2574 * the separator char. Both paths must exist (i.e., zero-length paths 2575 * don't match). 2576 */ 2577 bool hid_compare_device_paths(struct hid_device *hdev_a, 2578 struct hid_device *hdev_b, char separator) 2579 { 2580 int n1 = strrchr(hdev_a->phys, separator) - hdev_a->phys; 2581 int n2 = strrchr(hdev_b->phys, separator) - hdev_b->phys; 2582 2583 if (n1 != n2 || n1 <= 0 || n2 <= 0) 2584 return false; 2585 2586 return !strncmp(hdev_a->phys, hdev_b->phys, n1); 2587 } 2588 EXPORT_SYMBOL_GPL(hid_compare_device_paths); 2589 2590 static bool hid_check_device_match(struct hid_device *hdev, 2591 struct hid_driver *hdrv, 2592 const struct hid_device_id **id) 2593 { 2594 *id = hid_match_device(hdev, hdrv); 2595 if (!*id) 2596 return false; 2597 2598 if (hdrv->match) 2599 return hdrv->match(hdev, hid_ignore_special_drivers); 2600 2601 /* 2602 * hid-generic implements .match(), so we must be dealing with a 2603 * different HID driver here, and can simply check if 2604 * hid_ignore_special_drivers is set or not. 2605 */ 2606 return !hid_ignore_special_drivers; 2607 } 2608 2609 static int __hid_device_probe(struct hid_device *hdev, struct hid_driver *hdrv) 2610 { 2611 const struct hid_device_id *id; 2612 int ret; 2613 2614 if (!hid_check_device_match(hdev, hdrv, &id)) 2615 return -ENODEV; 2616 2617 hdev->devres_group_id = devres_open_group(&hdev->dev, NULL, GFP_KERNEL); 2618 if (!hdev->devres_group_id) 2619 return -ENOMEM; 2620 2621 /* reset the quirks that has been previously set */ 2622 hdev->quirks = hid_lookup_quirk(hdev); 2623 hdev->driver = hdrv; 2624 2625 if (hdrv->probe) { 2626 ret = hdrv->probe(hdev, id); 2627 } else { /* default probe */ 2628 ret = hid_open_report(hdev); 2629 if (!ret) 2630 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); 2631 } 2632 2633 /* 2634 * Note that we are not closing the devres group opened above so 2635 * even resources that were attached to the device after probe is 2636 * run are released when hid_device_remove() is executed. This is 2637 * needed as some drivers would allocate additional resources, 2638 * for example when updating firmware. 2639 */ 2640 2641 if (ret) { 2642 devres_release_group(&hdev->dev, hdev->devres_group_id); 2643 hid_close_report(hdev); 2644 hdev->driver = NULL; 2645 } 2646 2647 return ret; 2648 } 2649 2650 static int hid_device_probe(struct device *dev) 2651 { 2652 struct hid_device *hdev = to_hid_device(dev); 2653 struct hid_driver *hdrv = to_hid_driver(dev->driver); 2654 int ret = 0; 2655 2656 if (down_interruptible(&hdev->driver_input_lock)) 2657 return -EINTR; 2658 2659 hdev->io_started = false; 2660 clear_bit(ffs(HID_STAT_REPROBED), &hdev->status); 2661 2662 if (!hdev->driver) 2663 ret = __hid_device_probe(hdev, hdrv); 2664 2665 if (!hdev->io_started) 2666 up(&hdev->driver_input_lock); 2667 2668 return ret; 2669 } 2670 2671 static void hid_device_remove(struct device *dev) 2672 { 2673 struct hid_device *hdev = to_hid_device(dev); 2674 struct hid_driver *hdrv; 2675 2676 down(&hdev->driver_input_lock); 2677 hdev->io_started = false; 2678 2679 hdrv = hdev->driver; 2680 if (hdrv) { 2681 if (hdrv->remove) 2682 hdrv->remove(hdev); 2683 else /* default remove */ 2684 hid_hw_stop(hdev); 2685 2686 /* Release all devres resources allocated by the driver */ 2687 devres_release_group(&hdev->dev, hdev->devres_group_id); 2688 2689 hid_close_report(hdev); 2690 hdev->driver = NULL; 2691 } 2692 2693 if (!hdev->io_started) 2694 up(&hdev->driver_input_lock); 2695 } 2696 2697 static ssize_t modalias_show(struct device *dev, struct device_attribute *a, 2698 char *buf) 2699 { 2700 struct hid_device *hdev = container_of(dev, struct hid_device, dev); 2701 2702 return scnprintf(buf, PAGE_SIZE, "hid:b%04Xg%04Xv%08Xp%08X\n", 2703 hdev->bus, hdev->group, hdev->vendor, hdev->product); 2704 } 2705 static DEVICE_ATTR_RO(modalias); 2706 2707 static struct attribute *hid_dev_attrs[] = { 2708 &dev_attr_modalias.attr, 2709 NULL, 2710 }; 2711 static struct bin_attribute *hid_dev_bin_attrs[] = { 2712 &dev_bin_attr_report_desc, 2713 NULL 2714 }; 2715 static const struct attribute_group hid_dev_group = { 2716 .attrs = hid_dev_attrs, 2717 .bin_attrs = hid_dev_bin_attrs, 2718 }; 2719 __ATTRIBUTE_GROUPS(hid_dev); 2720 2721 static int hid_uevent(const struct device *dev, struct kobj_uevent_env *env) 2722 { 2723 const struct hid_device *hdev = to_hid_device(dev); 2724 2725 if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X", 2726 hdev->bus, hdev->vendor, hdev->product)) 2727 return -ENOMEM; 2728 2729 if (add_uevent_var(env, "HID_NAME=%s", hdev->name)) 2730 return -ENOMEM; 2731 2732 if (add_uevent_var(env, "HID_PHYS=%s", hdev->phys)) 2733 return -ENOMEM; 2734 2735 if (add_uevent_var(env, "HID_UNIQ=%s", hdev->uniq)) 2736 return -ENOMEM; 2737 2738 if (add_uevent_var(env, "MODALIAS=hid:b%04Xg%04Xv%08Xp%08X", 2739 hdev->bus, hdev->group, hdev->vendor, hdev->product)) 2740 return -ENOMEM; 2741 2742 return 0; 2743 } 2744 2745 struct bus_type hid_bus_type = { 2746 .name = "hid", 2747 .dev_groups = hid_dev_groups, 2748 .drv_groups = hid_drv_groups, 2749 .match = hid_bus_match, 2750 .probe = hid_device_probe, 2751 .remove = hid_device_remove, 2752 .uevent = hid_uevent, 2753 }; 2754 EXPORT_SYMBOL(hid_bus_type); 2755 2756 int hid_add_device(struct hid_device *hdev) 2757 { 2758 static atomic_t id = ATOMIC_INIT(0); 2759 int ret; 2760 2761 if (WARN_ON(hdev->status & HID_STAT_ADDED)) 2762 return -EBUSY; 2763 2764 hdev->quirks = hid_lookup_quirk(hdev); 2765 2766 /* we need to kill them here, otherwise they will stay allocated to 2767 * wait for coming driver */ 2768 if (hid_ignore(hdev)) 2769 return -ENODEV; 2770 2771 /* 2772 * Check for the mandatory transport channel. 2773 */ 2774 if (!hdev->ll_driver->raw_request) { 2775 hid_err(hdev, "transport driver missing .raw_request()\n"); 2776 return -EINVAL; 2777 } 2778 2779 /* 2780 * Read the device report descriptor once and use as template 2781 * for the driver-specific modifications. 2782 */ 2783 ret = hdev->ll_driver->parse(hdev); 2784 if (ret) 2785 return ret; 2786 if (!hdev->dev_rdesc) 2787 return -ENODEV; 2788 2789 /* 2790 * Scan generic devices for group information 2791 */ 2792 if (hid_ignore_special_drivers) { 2793 hdev->group = HID_GROUP_GENERIC; 2794 } else if (!hdev->group && 2795 !(hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)) { 2796 ret = hid_scan_report(hdev); 2797 if (ret) 2798 hid_warn(hdev, "bad device descriptor (%d)\n", ret); 2799 } 2800 2801 hdev->id = atomic_inc_return(&id); 2802 2803 /* XXX hack, any other cleaner solution after the driver core 2804 * is converted to allow more than 20 bytes as the device name? */ 2805 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus, 2806 hdev->vendor, hdev->product, hdev->id); 2807 2808 hid_debug_register(hdev, dev_name(&hdev->dev)); 2809 ret = device_add(&hdev->dev); 2810 if (!ret) 2811 hdev->status |= HID_STAT_ADDED; 2812 else 2813 hid_debug_unregister(hdev); 2814 2815 return ret; 2816 } 2817 EXPORT_SYMBOL_GPL(hid_add_device); 2818 2819 /** 2820 * hid_allocate_device - allocate new hid device descriptor 2821 * 2822 * Allocate and initialize hid device, so that hid_destroy_device might be 2823 * used to free it. 2824 * 2825 * New hid_device pointer is returned on success, otherwise ERR_PTR encoded 2826 * error value. 2827 */ 2828 struct hid_device *hid_allocate_device(void) 2829 { 2830 struct hid_device *hdev; 2831 int ret = -ENOMEM; 2832 2833 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL); 2834 if (hdev == NULL) 2835 return ERR_PTR(ret); 2836 2837 device_initialize(&hdev->dev); 2838 hdev->dev.release = hid_device_release; 2839 hdev->dev.bus = &hid_bus_type; 2840 device_enable_async_suspend(&hdev->dev); 2841 2842 hid_close_report(hdev); 2843 2844 init_waitqueue_head(&hdev->debug_wait); 2845 INIT_LIST_HEAD(&hdev->debug_list); 2846 spin_lock_init(&hdev->debug_list_lock); 2847 sema_init(&hdev->driver_input_lock, 1); 2848 mutex_init(&hdev->ll_open_lock); 2849 2850 hid_bpf_device_init(hdev); 2851 2852 return hdev; 2853 } 2854 EXPORT_SYMBOL_GPL(hid_allocate_device); 2855 2856 static void hid_remove_device(struct hid_device *hdev) 2857 { 2858 if (hdev->status & HID_STAT_ADDED) { 2859 device_del(&hdev->dev); 2860 hid_debug_unregister(hdev); 2861 hdev->status &= ~HID_STAT_ADDED; 2862 } 2863 kfree(hdev->dev_rdesc); 2864 hdev->dev_rdesc = NULL; 2865 hdev->dev_rsize = 0; 2866 } 2867 2868 /** 2869 * hid_destroy_device - free previously allocated device 2870 * 2871 * @hdev: hid device 2872 * 2873 * If you allocate hid_device through hid_allocate_device, you should ever 2874 * free by this function. 2875 */ 2876 void hid_destroy_device(struct hid_device *hdev) 2877 { 2878 hid_bpf_destroy_device(hdev); 2879 hid_remove_device(hdev); 2880 put_device(&hdev->dev); 2881 } 2882 EXPORT_SYMBOL_GPL(hid_destroy_device); 2883 2884 2885 static int __hid_bus_reprobe_drivers(struct device *dev, void *data) 2886 { 2887 struct hid_driver *hdrv = data; 2888 struct hid_device *hdev = to_hid_device(dev); 2889 2890 if (hdev->driver == hdrv && 2891 !hdrv->match(hdev, hid_ignore_special_drivers) && 2892 !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status)) 2893 return device_reprobe(dev); 2894 2895 return 0; 2896 } 2897 2898 static int __hid_bus_driver_added(struct device_driver *drv, void *data) 2899 { 2900 struct hid_driver *hdrv = to_hid_driver(drv); 2901 2902 if (hdrv->match) { 2903 bus_for_each_dev(&hid_bus_type, NULL, hdrv, 2904 __hid_bus_reprobe_drivers); 2905 } 2906 2907 return 0; 2908 } 2909 2910 static int __bus_removed_driver(struct device_driver *drv, void *data) 2911 { 2912 return bus_rescan_devices(&hid_bus_type); 2913 } 2914 2915 int __hid_register_driver(struct hid_driver *hdrv, struct module *owner, 2916 const char *mod_name) 2917 { 2918 int ret; 2919 2920 hdrv->driver.name = hdrv->name; 2921 hdrv->driver.bus = &hid_bus_type; 2922 hdrv->driver.owner = owner; 2923 hdrv->driver.mod_name = mod_name; 2924 2925 INIT_LIST_HEAD(&hdrv->dyn_list); 2926 spin_lock_init(&hdrv->dyn_lock); 2927 2928 ret = driver_register(&hdrv->driver); 2929 2930 if (ret == 0) 2931 bus_for_each_drv(&hid_bus_type, NULL, NULL, 2932 __hid_bus_driver_added); 2933 2934 return ret; 2935 } 2936 EXPORT_SYMBOL_GPL(__hid_register_driver); 2937 2938 void hid_unregister_driver(struct hid_driver *hdrv) 2939 { 2940 driver_unregister(&hdrv->driver); 2941 hid_free_dynids(hdrv); 2942 2943 bus_for_each_drv(&hid_bus_type, NULL, hdrv, __bus_removed_driver); 2944 } 2945 EXPORT_SYMBOL_GPL(hid_unregister_driver); 2946 2947 int hid_check_keys_pressed(struct hid_device *hid) 2948 { 2949 struct hid_input *hidinput; 2950 int i; 2951 2952 if (!(hid->claimed & HID_CLAIMED_INPUT)) 2953 return 0; 2954 2955 list_for_each_entry(hidinput, &hid->inputs, list) { 2956 for (i = 0; i < BITS_TO_LONGS(KEY_MAX); i++) 2957 if (hidinput->input->key[i]) 2958 return 1; 2959 } 2960 2961 return 0; 2962 } 2963 EXPORT_SYMBOL_GPL(hid_check_keys_pressed); 2964 2965 #ifdef CONFIG_HID_BPF 2966 static struct hid_bpf_ops hid_ops = { 2967 .hid_get_report = hid_get_report, 2968 .hid_hw_raw_request = hid_hw_raw_request, 2969 .owner = THIS_MODULE, 2970 .bus_type = &hid_bus_type, 2971 }; 2972 #endif 2973 2974 static int __init hid_init(void) 2975 { 2976 int ret; 2977 2978 ret = bus_register(&hid_bus_type); 2979 if (ret) { 2980 pr_err("can't register hid bus\n"); 2981 goto err; 2982 } 2983 2984 #ifdef CONFIG_HID_BPF 2985 hid_bpf_ops = &hid_ops; 2986 #endif 2987 2988 ret = hidraw_init(); 2989 if (ret) 2990 goto err_bus; 2991 2992 hid_debug_init(); 2993 2994 return 0; 2995 err_bus: 2996 bus_unregister(&hid_bus_type); 2997 err: 2998 return ret; 2999 } 3000 3001 static void __exit hid_exit(void) 3002 { 3003 #ifdef CONFIG_HID_BPF 3004 hid_bpf_ops = NULL; 3005 #endif 3006 hid_debug_exit(); 3007 hidraw_exit(); 3008 bus_unregister(&hid_bus_type); 3009 hid_quirks_exit(HID_BUS_ANY); 3010 } 3011 3012 module_init(hid_init); 3013 module_exit(hid_exit); 3014 3015 MODULE_AUTHOR("Andreas Gal"); 3016 MODULE_AUTHOR("Vojtech Pavlik"); 3017 MODULE_AUTHOR("Jiri Kosina"); 3018 MODULE_LICENSE("GPL"); 3019