1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * HID support for Linux 4 * 5 * Copyright (c) 1999 Andreas Gal 6 * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> 7 * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc 8 * Copyright (c) 2006-2012 Jiri Kosina 9 */ 10 11 /* 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 16 #include <linux/module.h> 17 #include <linux/slab.h> 18 #include <linux/init.h> 19 #include <linux/kernel.h> 20 #include <linux/list.h> 21 #include <linux/mm.h> 22 #include <linux/spinlock.h> 23 #include <linux/unaligned.h> 24 #include <asm/byteorder.h> 25 #include <linux/input.h> 26 #include <linux/wait.h> 27 #include <linux/vmalloc.h> 28 #include <linux/sched.h> 29 #include <linux/semaphore.h> 30 31 #include <linux/hid.h> 32 #include <linux/hiddev.h> 33 #include <linux/hid-debug.h> 34 #include <linux/hidraw.h> 35 36 #include "hid-ids.h" 37 38 /* 39 * Version Information 40 */ 41 42 #define DRIVER_DESC "HID core driver" 43 44 static int hid_ignore_special_drivers = 0; 45 module_param_named(ignore_special_drivers, hid_ignore_special_drivers, int, 0600); 46 MODULE_PARM_DESC(ignore_special_drivers, "Ignore any special drivers and handle all devices by generic driver"); 47 48 /* 49 * Convert a signed n-bit integer to signed 32-bit integer. 50 */ 51 52 static s32 snto32(__u32 value, unsigned int n) 53 { 54 if (!value || !n) 55 return 0; 56 57 if (n > 32) 58 n = 32; 59 60 return sign_extend32(value, n - 1); 61 } 62 63 /* 64 * Convert a signed 32-bit integer to a signed n-bit integer. 65 */ 66 67 static u32 s32ton(__s32 value, unsigned int n) 68 { 69 s32 a; 70 71 if (!value || !n) 72 return 0; 73 74 a = value >> (n - 1); 75 if (a && a != -1) 76 return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1; 77 return value & ((1 << n) - 1); 78 } 79 80 /* 81 * Register a new report for a device. 82 */ 83 84 struct hid_report *hid_register_report(struct hid_device *device, 85 enum hid_report_type type, unsigned int id, 86 unsigned int application) 87 { 88 struct hid_report_enum *report_enum = device->report_enum + type; 89 struct hid_report *report; 90 91 if (id >= HID_MAX_IDS) 92 return NULL; 93 if (report_enum->report_id_hash[id]) 94 return report_enum->report_id_hash[id]; 95 96 report = kzalloc(sizeof(struct hid_report), GFP_KERNEL); 97 if (!report) 98 return NULL; 99 100 if (id != 0) 101 report_enum->numbered = 1; 102 103 report->id = id; 104 report->type = type; 105 report->size = 0; 106 report->device = device; 107 report->application = application; 108 report_enum->report_id_hash[id] = report; 109 110 list_add_tail(&report->list, &report_enum->report_list); 111 INIT_LIST_HEAD(&report->field_entry_list); 112 113 return report; 114 } 115 EXPORT_SYMBOL_GPL(hid_register_report); 116 117 /* 118 * Register a new field for this report. 119 */ 120 121 static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages) 122 { 123 struct hid_field *field; 124 125 if (report->maxfield == HID_MAX_FIELDS) { 126 hid_err(report->device, "too many fields in report\n"); 127 return NULL; 128 } 129 130 field = kvzalloc((sizeof(struct hid_field) + 131 usages * sizeof(struct hid_usage) + 132 3 * usages * sizeof(unsigned int)), GFP_KERNEL); 133 if (!field) 134 return NULL; 135 136 field->index = report->maxfield++; 137 report->field[field->index] = field; 138 field->usage = (struct hid_usage *)(field + 1); 139 field->value = (s32 *)(field->usage + usages); 140 field->new_value = (s32 *)(field->value + usages); 141 field->usages_priorities = (s32 *)(field->new_value + usages); 142 field->report = report; 143 144 return field; 145 } 146 147 /* 148 * Open a collection. The type/usage is pushed on the stack. 149 */ 150 151 static int open_collection(struct hid_parser *parser, unsigned type) 152 { 153 struct hid_collection *collection; 154 unsigned usage; 155 int collection_index; 156 157 usage = parser->local.usage[0]; 158 159 if (parser->collection_stack_ptr == parser->collection_stack_size) { 160 unsigned int *collection_stack; 161 unsigned int new_size = parser->collection_stack_size + 162 HID_COLLECTION_STACK_SIZE; 163 164 collection_stack = krealloc(parser->collection_stack, 165 new_size * sizeof(unsigned int), 166 GFP_KERNEL); 167 if (!collection_stack) 168 return -ENOMEM; 169 170 parser->collection_stack = collection_stack; 171 parser->collection_stack_size = new_size; 172 } 173 174 if (parser->device->maxcollection == parser->device->collection_size) { 175 collection = kmalloc( 176 array3_size(sizeof(struct hid_collection), 177 parser->device->collection_size, 178 2), 179 GFP_KERNEL); 180 if (collection == NULL) { 181 hid_err(parser->device, "failed to reallocate collection array\n"); 182 return -ENOMEM; 183 } 184 memcpy(collection, parser->device->collection, 185 sizeof(struct hid_collection) * 186 parser->device->collection_size); 187 memset(collection + parser->device->collection_size, 0, 188 sizeof(struct hid_collection) * 189 parser->device->collection_size); 190 kfree(parser->device->collection); 191 parser->device->collection = collection; 192 parser->device->collection_size *= 2; 193 } 194 195 parser->collection_stack[parser->collection_stack_ptr++] = 196 parser->device->maxcollection; 197 198 collection_index = parser->device->maxcollection++; 199 collection = parser->device->collection + collection_index; 200 collection->type = type; 201 collection->usage = usage; 202 collection->level = parser->collection_stack_ptr - 1; 203 collection->parent_idx = (collection->level == 0) ? -1 : 204 parser->collection_stack[collection->level - 1]; 205 206 if (type == HID_COLLECTION_APPLICATION) 207 parser->device->maxapplication++; 208 209 return 0; 210 } 211 212 /* 213 * Close a collection. 214 */ 215 216 static int close_collection(struct hid_parser *parser) 217 { 218 if (!parser->collection_stack_ptr) { 219 hid_err(parser->device, "collection stack underflow\n"); 220 return -EINVAL; 221 } 222 parser->collection_stack_ptr--; 223 return 0; 224 } 225 226 /* 227 * Climb up the stack, search for the specified collection type 228 * and return the usage. 229 */ 230 231 static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type) 232 { 233 struct hid_collection *collection = parser->device->collection; 234 int n; 235 236 for (n = parser->collection_stack_ptr - 1; n >= 0; n--) { 237 unsigned index = parser->collection_stack[n]; 238 if (collection[index].type == type) 239 return collection[index].usage; 240 } 241 return 0; /* we know nothing about this usage type */ 242 } 243 244 /* 245 * Concatenate usage which defines 16 bits or less with the 246 * currently defined usage page to form a 32 bit usage 247 */ 248 249 static void complete_usage(struct hid_parser *parser, unsigned int index) 250 { 251 parser->local.usage[index] &= 0xFFFF; 252 parser->local.usage[index] |= 253 (parser->global.usage_page & 0xFFFF) << 16; 254 } 255 256 /* 257 * Add a usage to the temporary parser table. 258 */ 259 260 static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size) 261 { 262 if (parser->local.usage_index >= HID_MAX_USAGES) { 263 hid_err(parser->device, "usage index exceeded\n"); 264 return -1; 265 } 266 parser->local.usage[parser->local.usage_index] = usage; 267 268 /* 269 * If Usage item only includes usage id, concatenate it with 270 * currently defined usage page 271 */ 272 if (size <= 2) 273 complete_usage(parser, parser->local.usage_index); 274 275 parser->local.usage_size[parser->local.usage_index] = size; 276 parser->local.collection_index[parser->local.usage_index] = 277 parser->collection_stack_ptr ? 278 parser->collection_stack[parser->collection_stack_ptr - 1] : 0; 279 parser->local.usage_index++; 280 return 0; 281 } 282 283 /* 284 * Register a new field for this report. 285 */ 286 287 static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsigned flags) 288 { 289 struct hid_report *report; 290 struct hid_field *field; 291 unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE; 292 unsigned int usages; 293 unsigned int offset; 294 unsigned int i; 295 unsigned int application; 296 297 application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION); 298 299 report = hid_register_report(parser->device, report_type, 300 parser->global.report_id, application); 301 if (!report) { 302 hid_err(parser->device, "hid_register_report failed\n"); 303 return -1; 304 } 305 306 /* Handle both signed and unsigned cases properly */ 307 if ((parser->global.logical_minimum < 0 && 308 parser->global.logical_maximum < 309 parser->global.logical_minimum) || 310 (parser->global.logical_minimum >= 0 && 311 (__u32)parser->global.logical_maximum < 312 (__u32)parser->global.logical_minimum)) { 313 dbg_hid("logical range invalid 0x%x 0x%x\n", 314 parser->global.logical_minimum, 315 parser->global.logical_maximum); 316 return -1; 317 } 318 319 offset = report->size; 320 report->size += parser->global.report_size * parser->global.report_count; 321 322 if (parser->device->ll_driver->max_buffer_size) 323 max_buffer_size = parser->device->ll_driver->max_buffer_size; 324 325 /* Total size check: Allow for possible report index byte */ 326 if (report->size > (max_buffer_size - 1) << 3) { 327 hid_err(parser->device, "report is too long\n"); 328 return -1; 329 } 330 331 if (!parser->local.usage_index) /* Ignore padding fields */ 332 return 0; 333 334 usages = max_t(unsigned, parser->local.usage_index, 335 parser->global.report_count); 336 337 field = hid_register_field(report, usages); 338 if (!field) 339 return 0; 340 341 field->physical = hid_lookup_collection(parser, HID_COLLECTION_PHYSICAL); 342 field->logical = hid_lookup_collection(parser, HID_COLLECTION_LOGICAL); 343 field->application = application; 344 345 for (i = 0; i < usages; i++) { 346 unsigned j = i; 347 /* Duplicate the last usage we parsed if we have excess values */ 348 if (i >= parser->local.usage_index) 349 j = parser->local.usage_index - 1; 350 field->usage[i].hid = parser->local.usage[j]; 351 field->usage[i].collection_index = 352 parser->local.collection_index[j]; 353 field->usage[i].usage_index = i; 354 field->usage[i].resolution_multiplier = 1; 355 } 356 357 field->maxusage = usages; 358 field->flags = flags; 359 field->report_offset = offset; 360 field->report_type = report_type; 361 field->report_size = parser->global.report_size; 362 field->report_count = parser->global.report_count; 363 field->logical_minimum = parser->global.logical_minimum; 364 field->logical_maximum = parser->global.logical_maximum; 365 field->physical_minimum = parser->global.physical_minimum; 366 field->physical_maximum = parser->global.physical_maximum; 367 field->unit_exponent = parser->global.unit_exponent; 368 field->unit = parser->global.unit; 369 370 return 0; 371 } 372 373 /* 374 * Read data value from item. 375 */ 376 377 static u32 item_udata(struct hid_item *item) 378 { 379 switch (item->size) { 380 case 1: return item->data.u8; 381 case 2: return item->data.u16; 382 case 4: return item->data.u32; 383 } 384 return 0; 385 } 386 387 static s32 item_sdata(struct hid_item *item) 388 { 389 switch (item->size) { 390 case 1: return item->data.s8; 391 case 2: return item->data.s16; 392 case 4: return item->data.s32; 393 } 394 return 0; 395 } 396 397 /* 398 * Process a global item. 399 */ 400 401 static int hid_parser_global(struct hid_parser *parser, struct hid_item *item) 402 { 403 __s32 raw_value; 404 switch (item->tag) { 405 case HID_GLOBAL_ITEM_TAG_PUSH: 406 407 if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) { 408 hid_err(parser->device, "global environment stack overflow\n"); 409 return -1; 410 } 411 412 memcpy(parser->global_stack + parser->global_stack_ptr++, 413 &parser->global, sizeof(struct hid_global)); 414 return 0; 415 416 case HID_GLOBAL_ITEM_TAG_POP: 417 418 if (!parser->global_stack_ptr) { 419 hid_err(parser->device, "global environment stack underflow\n"); 420 return -1; 421 } 422 423 memcpy(&parser->global, parser->global_stack + 424 --parser->global_stack_ptr, sizeof(struct hid_global)); 425 return 0; 426 427 case HID_GLOBAL_ITEM_TAG_USAGE_PAGE: 428 parser->global.usage_page = item_udata(item); 429 return 0; 430 431 case HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM: 432 parser->global.logical_minimum = item_sdata(item); 433 return 0; 434 435 case HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM: 436 if (parser->global.logical_minimum < 0) 437 parser->global.logical_maximum = item_sdata(item); 438 else 439 parser->global.logical_maximum = item_udata(item); 440 return 0; 441 442 case HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM: 443 parser->global.physical_minimum = item_sdata(item); 444 return 0; 445 446 case HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM: 447 if (parser->global.physical_minimum < 0) 448 parser->global.physical_maximum = item_sdata(item); 449 else 450 parser->global.physical_maximum = item_udata(item); 451 return 0; 452 453 case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT: 454 /* Many devices provide unit exponent as a two's complement 455 * nibble due to the common misunderstanding of HID 456 * specification 1.11, 6.2.2.7 Global Items. Attempt to handle 457 * both this and the standard encoding. */ 458 raw_value = item_sdata(item); 459 if (!(raw_value & 0xfffffff0)) 460 parser->global.unit_exponent = snto32(raw_value, 4); 461 else 462 parser->global.unit_exponent = raw_value; 463 return 0; 464 465 case HID_GLOBAL_ITEM_TAG_UNIT: 466 parser->global.unit = item_udata(item); 467 return 0; 468 469 case HID_GLOBAL_ITEM_TAG_REPORT_SIZE: 470 parser->global.report_size = item_udata(item); 471 if (parser->global.report_size > 256) { 472 hid_err(parser->device, "invalid report_size %d\n", 473 parser->global.report_size); 474 return -1; 475 } 476 return 0; 477 478 case HID_GLOBAL_ITEM_TAG_REPORT_COUNT: 479 parser->global.report_count = item_udata(item); 480 if (parser->global.report_count > HID_MAX_USAGES) { 481 hid_err(parser->device, "invalid report_count %d\n", 482 parser->global.report_count); 483 return -1; 484 } 485 return 0; 486 487 case HID_GLOBAL_ITEM_TAG_REPORT_ID: 488 parser->global.report_id = item_udata(item); 489 if (parser->global.report_id == 0 || 490 parser->global.report_id >= HID_MAX_IDS) { 491 hid_err(parser->device, "report_id %u is invalid\n", 492 parser->global.report_id); 493 return -1; 494 } 495 return 0; 496 497 default: 498 hid_err(parser->device, "unknown global tag 0x%x\n", item->tag); 499 return -1; 500 } 501 } 502 503 /* 504 * Process a local item. 505 */ 506 507 static int hid_parser_local(struct hid_parser *parser, struct hid_item *item) 508 { 509 __u32 data; 510 unsigned n; 511 __u32 count; 512 513 data = item_udata(item); 514 515 switch (item->tag) { 516 case HID_LOCAL_ITEM_TAG_DELIMITER: 517 518 if (data) { 519 /* 520 * We treat items before the first delimiter 521 * as global to all usage sets (branch 0). 522 * In the moment we process only these global 523 * items and the first delimiter set. 524 */ 525 if (parser->local.delimiter_depth != 0) { 526 hid_err(parser->device, "nested delimiters\n"); 527 return -1; 528 } 529 parser->local.delimiter_depth++; 530 parser->local.delimiter_branch++; 531 } else { 532 if (parser->local.delimiter_depth < 1) { 533 hid_err(parser->device, "bogus close delimiter\n"); 534 return -1; 535 } 536 parser->local.delimiter_depth--; 537 } 538 return 0; 539 540 case HID_LOCAL_ITEM_TAG_USAGE: 541 542 if (parser->local.delimiter_branch > 1) { 543 dbg_hid("alternative usage ignored\n"); 544 return 0; 545 } 546 547 return hid_add_usage(parser, data, item->size); 548 549 case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM: 550 551 if (parser->local.delimiter_branch > 1) { 552 dbg_hid("alternative usage ignored\n"); 553 return 0; 554 } 555 556 parser->local.usage_minimum = data; 557 return 0; 558 559 case HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM: 560 561 if (parser->local.delimiter_branch > 1) { 562 dbg_hid("alternative usage ignored\n"); 563 return 0; 564 } 565 566 count = data - parser->local.usage_minimum; 567 if (count + parser->local.usage_index >= HID_MAX_USAGES) { 568 /* 569 * We do not warn if the name is not set, we are 570 * actually pre-scanning the device. 571 */ 572 if (dev_name(&parser->device->dev)) 573 hid_warn(parser->device, 574 "ignoring exceeding usage max\n"); 575 data = HID_MAX_USAGES - parser->local.usage_index + 576 parser->local.usage_minimum - 1; 577 if (data <= 0) { 578 hid_err(parser->device, 579 "no more usage index available\n"); 580 return -1; 581 } 582 } 583 584 for (n = parser->local.usage_minimum; n <= data; n++) 585 if (hid_add_usage(parser, n, item->size)) { 586 dbg_hid("hid_add_usage failed\n"); 587 return -1; 588 } 589 return 0; 590 591 default: 592 593 dbg_hid("unknown local item tag 0x%x\n", item->tag); 594 return 0; 595 } 596 return 0; 597 } 598 599 /* 600 * Concatenate Usage Pages into Usages where relevant: 601 * As per specification, 6.2.2.8: "When the parser encounters a main item it 602 * concatenates the last declared Usage Page with a Usage to form a complete 603 * usage value." 604 */ 605 606 static void hid_concatenate_last_usage_page(struct hid_parser *parser) 607 { 608 int i; 609 unsigned int usage_page; 610 unsigned int current_page; 611 612 if (!parser->local.usage_index) 613 return; 614 615 usage_page = parser->global.usage_page; 616 617 /* 618 * Concatenate usage page again only if last declared Usage Page 619 * has not been already used in previous usages concatenation 620 */ 621 for (i = parser->local.usage_index - 1; i >= 0; i--) { 622 if (parser->local.usage_size[i] > 2) 623 /* Ignore extended usages */ 624 continue; 625 626 current_page = parser->local.usage[i] >> 16; 627 if (current_page == usage_page) 628 break; 629 630 complete_usage(parser, i); 631 } 632 } 633 634 /* 635 * Process a main item. 636 */ 637 638 static int hid_parser_main(struct hid_parser *parser, struct hid_item *item) 639 { 640 __u32 data; 641 int ret; 642 643 hid_concatenate_last_usage_page(parser); 644 645 data = item_udata(item); 646 647 switch (item->tag) { 648 case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION: 649 ret = open_collection(parser, data & 0xff); 650 break; 651 case HID_MAIN_ITEM_TAG_END_COLLECTION: 652 ret = close_collection(parser); 653 break; 654 case HID_MAIN_ITEM_TAG_INPUT: 655 ret = hid_add_field(parser, HID_INPUT_REPORT, data); 656 break; 657 case HID_MAIN_ITEM_TAG_OUTPUT: 658 ret = hid_add_field(parser, HID_OUTPUT_REPORT, data); 659 break; 660 case HID_MAIN_ITEM_TAG_FEATURE: 661 ret = hid_add_field(parser, HID_FEATURE_REPORT, data); 662 break; 663 default: 664 if (item->tag >= HID_MAIN_ITEM_TAG_RESERVED_MIN && 665 item->tag <= HID_MAIN_ITEM_TAG_RESERVED_MAX) 666 hid_warn_ratelimited(parser->device, "reserved main item tag 0x%x\n", item->tag); 667 else 668 hid_warn_ratelimited(parser->device, "unknown main item tag 0x%x\n", item->tag); 669 ret = 0; 670 } 671 672 memset(&parser->local, 0, sizeof(parser->local)); /* Reset the local parser environment */ 673 674 return ret; 675 } 676 677 /* 678 * Process a reserved item. 679 */ 680 681 static int hid_parser_reserved(struct hid_parser *parser, struct hid_item *item) 682 { 683 dbg_hid("reserved item type, tag 0x%x\n", item->tag); 684 return 0; 685 } 686 687 /* 688 * Free a report and all registered fields. The field->usage and 689 * field->value table's are allocated behind the field, so we need 690 * only to free(field) itself. 691 */ 692 693 static void hid_free_report(struct hid_report *report) 694 { 695 unsigned n; 696 697 kfree(report->field_entries); 698 699 for (n = 0; n < report->maxfield; n++) 700 kvfree(report->field[n]); 701 kfree(report); 702 } 703 704 /* 705 * Close report. This function returns the device 706 * state to the point prior to hid_open_report(). 707 */ 708 static void hid_close_report(struct hid_device *device) 709 { 710 unsigned i, j; 711 712 for (i = 0; i < HID_REPORT_TYPES; i++) { 713 struct hid_report_enum *report_enum = device->report_enum + i; 714 715 for (j = 0; j < HID_MAX_IDS; j++) { 716 struct hid_report *report = report_enum->report_id_hash[j]; 717 if (report) 718 hid_free_report(report); 719 } 720 memset(report_enum, 0, sizeof(*report_enum)); 721 INIT_LIST_HEAD(&report_enum->report_list); 722 } 723 724 /* 725 * If the HID driver had a rdesc_fixup() callback, dev->rdesc 726 * will be allocated by hid-core and needs to be freed. 727 * Otherwise, it is either equal to dev_rdesc or bpf_rdesc, in 728 * which cases it'll be freed later on device removal or destroy. 729 */ 730 if (device->rdesc != device->dev_rdesc && device->rdesc != device->bpf_rdesc) 731 kfree(device->rdesc); 732 device->rdesc = NULL; 733 device->rsize = 0; 734 735 kfree(device->collection); 736 device->collection = NULL; 737 device->collection_size = 0; 738 device->maxcollection = 0; 739 device->maxapplication = 0; 740 741 device->status &= ~HID_STAT_PARSED; 742 } 743 744 static inline void hid_free_bpf_rdesc(struct hid_device *hdev) 745 { 746 /* bpf_rdesc is either equal to dev_rdesc or allocated by call_hid_bpf_rdesc_fixup() */ 747 if (hdev->bpf_rdesc != hdev->dev_rdesc) 748 kfree(hdev->bpf_rdesc); 749 hdev->bpf_rdesc = NULL; 750 } 751 752 /* 753 * Free a device structure, all reports, and all fields. 754 */ 755 756 void hiddev_free(struct kref *ref) 757 { 758 struct hid_device *hid = container_of(ref, struct hid_device, ref); 759 760 hid_close_report(hid); 761 hid_free_bpf_rdesc(hid); 762 kfree(hid->dev_rdesc); 763 kfree(hid); 764 } 765 766 static void hid_device_release(struct device *dev) 767 { 768 struct hid_device *hid = to_hid_device(dev); 769 770 kref_put(&hid->ref, hiddev_free); 771 } 772 773 /* 774 * Fetch a report description item from the data stream. We support long 775 * items, though they are not used yet. 776 */ 777 778 static const u8 *fetch_item(const __u8 *start, const __u8 *end, struct hid_item *item) 779 { 780 u8 b; 781 782 if ((end - start) <= 0) 783 return NULL; 784 785 b = *start++; 786 787 item->type = (b >> 2) & 3; 788 item->tag = (b >> 4) & 15; 789 790 if (item->tag == HID_ITEM_TAG_LONG) { 791 792 item->format = HID_ITEM_FORMAT_LONG; 793 794 if ((end - start) < 2) 795 return NULL; 796 797 item->size = *start++; 798 item->tag = *start++; 799 800 if ((end - start) < item->size) 801 return NULL; 802 803 item->data.longdata = start; 804 start += item->size; 805 return start; 806 } 807 808 item->format = HID_ITEM_FORMAT_SHORT; 809 item->size = BIT(b & 3) >> 1; /* 0, 1, 2, 3 -> 0, 1, 2, 4 */ 810 811 if (end - start < item->size) 812 return NULL; 813 814 switch (item->size) { 815 case 0: 816 break; 817 818 case 1: 819 item->data.u8 = *start; 820 break; 821 822 case 2: 823 item->data.u16 = get_unaligned_le16(start); 824 break; 825 826 case 4: 827 item->data.u32 = get_unaligned_le32(start); 828 break; 829 } 830 831 return start + item->size; 832 } 833 834 static void hid_scan_input_usage(struct hid_parser *parser, u32 usage) 835 { 836 struct hid_device *hid = parser->device; 837 838 if (usage == HID_DG_CONTACTID) 839 hid->group = HID_GROUP_MULTITOUCH; 840 } 841 842 static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage) 843 { 844 if (usage == 0xff0000c5 && parser->global.report_count == 256 && 845 parser->global.report_size == 8) 846 parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8; 847 848 if (usage == 0xff0000c6 && parser->global.report_count == 1 && 849 parser->global.report_size == 8) 850 parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8; 851 } 852 853 static void hid_scan_collection(struct hid_parser *parser, unsigned type) 854 { 855 struct hid_device *hid = parser->device; 856 int i; 857 858 if (((parser->global.usage_page << 16) == HID_UP_SENSOR) && 859 (type == HID_COLLECTION_PHYSICAL || 860 type == HID_COLLECTION_APPLICATION)) 861 hid->group = HID_GROUP_SENSOR_HUB; 862 863 if (hid->vendor == USB_VENDOR_ID_MICROSOFT && 864 hid->product == USB_DEVICE_ID_MS_POWER_COVER && 865 hid->group == HID_GROUP_MULTITOUCH) 866 hid->group = HID_GROUP_GENERIC; 867 868 if ((parser->global.usage_page << 16) == HID_UP_GENDESK) 869 for (i = 0; i < parser->local.usage_index; i++) 870 if (parser->local.usage[i] == HID_GD_POINTER) 871 parser->scan_flags |= HID_SCAN_FLAG_GD_POINTER; 872 873 if ((parser->global.usage_page << 16) >= HID_UP_MSVENDOR) 874 parser->scan_flags |= HID_SCAN_FLAG_VENDOR_SPECIFIC; 875 876 if ((parser->global.usage_page << 16) == HID_UP_GOOGLEVENDOR) 877 for (i = 0; i < parser->local.usage_index; i++) 878 if (parser->local.usage[i] == 879 (HID_UP_GOOGLEVENDOR | 0x0001)) 880 parser->device->group = 881 HID_GROUP_VIVALDI; 882 } 883 884 static int hid_scan_main(struct hid_parser *parser, struct hid_item *item) 885 { 886 __u32 data; 887 int i; 888 889 hid_concatenate_last_usage_page(parser); 890 891 data = item_udata(item); 892 893 switch (item->tag) { 894 case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION: 895 hid_scan_collection(parser, data & 0xff); 896 break; 897 case HID_MAIN_ITEM_TAG_END_COLLECTION: 898 break; 899 case HID_MAIN_ITEM_TAG_INPUT: 900 /* ignore constant inputs, they will be ignored by hid-input */ 901 if (data & HID_MAIN_ITEM_CONSTANT) 902 break; 903 for (i = 0; i < parser->local.usage_index; i++) 904 hid_scan_input_usage(parser, parser->local.usage[i]); 905 break; 906 case HID_MAIN_ITEM_TAG_OUTPUT: 907 break; 908 case HID_MAIN_ITEM_TAG_FEATURE: 909 for (i = 0; i < parser->local.usage_index; i++) 910 hid_scan_feature_usage(parser, parser->local.usage[i]); 911 break; 912 } 913 914 /* Reset the local parser environment */ 915 memset(&parser->local, 0, sizeof(parser->local)); 916 917 return 0; 918 } 919 920 /* 921 * Scan a report descriptor before the device is added to the bus. 922 * Sets device groups and other properties that determine what driver 923 * to load. 924 */ 925 static int hid_scan_report(struct hid_device *hid) 926 { 927 struct hid_parser *parser; 928 struct hid_item item; 929 const __u8 *start = hid->dev_rdesc; 930 const __u8 *end = start + hid->dev_rsize; 931 static int (*dispatch_type[])(struct hid_parser *parser, 932 struct hid_item *item) = { 933 hid_scan_main, 934 hid_parser_global, 935 hid_parser_local, 936 hid_parser_reserved 937 }; 938 939 parser = vzalloc(sizeof(struct hid_parser)); 940 if (!parser) 941 return -ENOMEM; 942 943 parser->device = hid; 944 hid->group = HID_GROUP_GENERIC; 945 946 /* 947 * The parsing is simpler than the one in hid_open_report() as we should 948 * be robust against hid errors. Those errors will be raised by 949 * hid_open_report() anyway. 950 */ 951 while ((start = fetch_item(start, end, &item)) != NULL) 952 dispatch_type[item.type](parser, &item); 953 954 /* 955 * Handle special flags set during scanning. 956 */ 957 if ((parser->scan_flags & HID_SCAN_FLAG_MT_WIN_8) && 958 (hid->group == HID_GROUP_MULTITOUCH)) 959 hid->group = HID_GROUP_MULTITOUCH_WIN_8; 960 961 /* 962 * Vendor specific handlings 963 */ 964 switch (hid->vendor) { 965 case USB_VENDOR_ID_WACOM: 966 hid->group = HID_GROUP_WACOM; 967 break; 968 case USB_VENDOR_ID_SYNAPTICS: 969 if (hid->group == HID_GROUP_GENERIC) 970 if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC) 971 && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER)) 972 /* 973 * hid-rmi should take care of them, 974 * not hid-generic 975 */ 976 hid->group = HID_GROUP_RMI; 977 break; 978 } 979 980 kfree(parser->collection_stack); 981 vfree(parser); 982 return 0; 983 } 984 985 /** 986 * hid_parse_report - parse device report 987 * 988 * @hid: hid device 989 * @start: report start 990 * @size: report size 991 * 992 * Allocate the device report as read by the bus driver. This function should 993 * only be called from parse() in ll drivers. 994 */ 995 int hid_parse_report(struct hid_device *hid, const __u8 *start, unsigned size) 996 { 997 hid->dev_rdesc = kmemdup(start, size, GFP_KERNEL); 998 if (!hid->dev_rdesc) 999 return -ENOMEM; 1000 hid->dev_rsize = size; 1001 return 0; 1002 } 1003 EXPORT_SYMBOL_GPL(hid_parse_report); 1004 1005 static const char * const hid_report_names[] = { 1006 "HID_INPUT_REPORT", 1007 "HID_OUTPUT_REPORT", 1008 "HID_FEATURE_REPORT", 1009 }; 1010 /** 1011 * hid_validate_values - validate existing device report's value indexes 1012 * 1013 * @hid: hid device 1014 * @type: which report type to examine 1015 * @id: which report ID to examine (0 for first) 1016 * @field_index: which report field to examine 1017 * @report_counts: expected number of values 1018 * 1019 * Validate the number of values in a given field of a given report, after 1020 * parsing. 1021 */ 1022 struct hid_report *hid_validate_values(struct hid_device *hid, 1023 enum hid_report_type type, unsigned int id, 1024 unsigned int field_index, 1025 unsigned int report_counts) 1026 { 1027 struct hid_report *report; 1028 1029 if (type > HID_FEATURE_REPORT) { 1030 hid_err(hid, "invalid HID report type %u\n", type); 1031 return NULL; 1032 } 1033 1034 if (id >= HID_MAX_IDS) { 1035 hid_err(hid, "invalid HID report id %u\n", id); 1036 return NULL; 1037 } 1038 1039 /* 1040 * Explicitly not using hid_get_report() here since it depends on 1041 * ->numbered being checked, which may not always be the case when 1042 * drivers go to access report values. 1043 */ 1044 if (id == 0) { 1045 /* 1046 * Validating on id 0 means we should examine the first 1047 * report in the list. 1048 */ 1049 report = list_first_entry_or_null( 1050 &hid->report_enum[type].report_list, 1051 struct hid_report, list); 1052 } else { 1053 report = hid->report_enum[type].report_id_hash[id]; 1054 } 1055 if (!report) { 1056 hid_err(hid, "missing %s %u\n", hid_report_names[type], id); 1057 return NULL; 1058 } 1059 if (report->maxfield <= field_index) { 1060 hid_err(hid, "not enough fields in %s %u\n", 1061 hid_report_names[type], id); 1062 return NULL; 1063 } 1064 if (report->field[field_index]->report_count < report_counts) { 1065 hid_err(hid, "not enough values in %s %u field %u\n", 1066 hid_report_names[type], id, field_index); 1067 return NULL; 1068 } 1069 return report; 1070 } 1071 EXPORT_SYMBOL_GPL(hid_validate_values); 1072 1073 static int hid_calculate_multiplier(struct hid_device *hid, 1074 struct hid_field *multiplier) 1075 { 1076 int m; 1077 __s32 v = *multiplier->value; 1078 __s32 lmin = multiplier->logical_minimum; 1079 __s32 lmax = multiplier->logical_maximum; 1080 __s32 pmin = multiplier->physical_minimum; 1081 __s32 pmax = multiplier->physical_maximum; 1082 1083 /* 1084 * "Because OS implementations will generally divide the control's 1085 * reported count by the Effective Resolution Multiplier, designers 1086 * should take care not to establish a potential Effective 1087 * Resolution Multiplier of zero." 1088 * HID Usage Table, v1.12, Section 4.3.1, p31 1089 */ 1090 if (lmax - lmin == 0) 1091 return 1; 1092 /* 1093 * Handling the unit exponent is left as an exercise to whoever 1094 * finds a device where that exponent is not 0. 1095 */ 1096 m = ((v - lmin)/(lmax - lmin) * (pmax - pmin) + pmin); 1097 if (unlikely(multiplier->unit_exponent != 0)) { 1098 hid_warn(hid, 1099 "unsupported Resolution Multiplier unit exponent %d\n", 1100 multiplier->unit_exponent); 1101 } 1102 1103 /* There are no devices with an effective multiplier > 255 */ 1104 if (unlikely(m == 0 || m > 255 || m < -255)) { 1105 hid_warn(hid, "unsupported Resolution Multiplier %d\n", m); 1106 m = 1; 1107 } 1108 1109 return m; 1110 } 1111 1112 static void hid_apply_multiplier_to_field(struct hid_device *hid, 1113 struct hid_field *field, 1114 struct hid_collection *multiplier_collection, 1115 int effective_multiplier) 1116 { 1117 struct hid_collection *collection; 1118 struct hid_usage *usage; 1119 int i; 1120 1121 /* 1122 * If multiplier_collection is NULL, the multiplier applies 1123 * to all fields in the report. 1124 * Otherwise, it is the Logical Collection the multiplier applies to 1125 * but our field may be in a subcollection of that collection. 1126 */ 1127 for (i = 0; i < field->maxusage; i++) { 1128 usage = &field->usage[i]; 1129 1130 collection = &hid->collection[usage->collection_index]; 1131 while (collection->parent_idx != -1 && 1132 collection != multiplier_collection) 1133 collection = &hid->collection[collection->parent_idx]; 1134 1135 if (collection->parent_idx != -1 || 1136 multiplier_collection == NULL) 1137 usage->resolution_multiplier = effective_multiplier; 1138 1139 } 1140 } 1141 1142 static void hid_apply_multiplier(struct hid_device *hid, 1143 struct hid_field *multiplier) 1144 { 1145 struct hid_report_enum *rep_enum; 1146 struct hid_report *rep; 1147 struct hid_field *field; 1148 struct hid_collection *multiplier_collection; 1149 int effective_multiplier; 1150 int i; 1151 1152 /* 1153 * "The Resolution Multiplier control must be contained in the same 1154 * Logical Collection as the control(s) to which it is to be applied. 1155 * If no Resolution Multiplier is defined, then the Resolution 1156 * Multiplier defaults to 1. If more than one control exists in a 1157 * Logical Collection, the Resolution Multiplier is associated with 1158 * all controls in the collection. If no Logical Collection is 1159 * defined, the Resolution Multiplier is associated with all 1160 * controls in the report." 1161 * HID Usage Table, v1.12, Section 4.3.1, p30 1162 * 1163 * Thus, search from the current collection upwards until we find a 1164 * logical collection. Then search all fields for that same parent 1165 * collection. Those are the fields the multiplier applies to. 1166 * 1167 * If we have more than one multiplier, it will overwrite the 1168 * applicable fields later. 1169 */ 1170 multiplier_collection = &hid->collection[multiplier->usage->collection_index]; 1171 while (multiplier_collection->parent_idx != -1 && 1172 multiplier_collection->type != HID_COLLECTION_LOGICAL) 1173 multiplier_collection = &hid->collection[multiplier_collection->parent_idx]; 1174 if (multiplier_collection->type != HID_COLLECTION_LOGICAL) 1175 multiplier_collection = NULL; 1176 1177 effective_multiplier = hid_calculate_multiplier(hid, multiplier); 1178 1179 rep_enum = &hid->report_enum[HID_INPUT_REPORT]; 1180 list_for_each_entry(rep, &rep_enum->report_list, list) { 1181 for (i = 0; i < rep->maxfield; i++) { 1182 field = rep->field[i]; 1183 hid_apply_multiplier_to_field(hid, field, 1184 multiplier_collection, 1185 effective_multiplier); 1186 } 1187 } 1188 } 1189 1190 /* 1191 * hid_setup_resolution_multiplier - set up all resolution multipliers 1192 * 1193 * @device: hid device 1194 * 1195 * Search for all Resolution Multiplier Feature Reports and apply their 1196 * value to all matching Input items. This only updates the internal struct 1197 * fields. 1198 * 1199 * The Resolution Multiplier is applied by the hardware. If the multiplier 1200 * is anything other than 1, the hardware will send pre-multiplied events 1201 * so that the same physical interaction generates an accumulated 1202 * accumulated_value = value * * multiplier 1203 * This may be achieved by sending 1204 * - "value * multiplier" for each event, or 1205 * - "value" but "multiplier" times as frequently, or 1206 * - a combination of the above 1207 * The only guarantee is that the same physical interaction always generates 1208 * an accumulated 'value * multiplier'. 1209 * 1210 * This function must be called before any event processing and after 1211 * any SetRequest to the Resolution Multiplier. 1212 */ 1213 void hid_setup_resolution_multiplier(struct hid_device *hid) 1214 { 1215 struct hid_report_enum *rep_enum; 1216 struct hid_report *rep; 1217 struct hid_usage *usage; 1218 int i, j; 1219 1220 rep_enum = &hid->report_enum[HID_FEATURE_REPORT]; 1221 list_for_each_entry(rep, &rep_enum->report_list, list) { 1222 for (i = 0; i < rep->maxfield; i++) { 1223 /* Ignore if report count is out of bounds. */ 1224 if (rep->field[i]->report_count < 1) 1225 continue; 1226 1227 for (j = 0; j < rep->field[i]->maxusage; j++) { 1228 usage = &rep->field[i]->usage[j]; 1229 if (usage->hid == HID_GD_RESOLUTION_MULTIPLIER) 1230 hid_apply_multiplier(hid, 1231 rep->field[i]); 1232 } 1233 } 1234 } 1235 } 1236 EXPORT_SYMBOL_GPL(hid_setup_resolution_multiplier); 1237 1238 /** 1239 * hid_open_report - open a driver-specific device report 1240 * 1241 * @device: hid device 1242 * 1243 * Parse a report description into a hid_device structure. Reports are 1244 * enumerated, fields are attached to these reports. 1245 * 0 returned on success, otherwise nonzero error value. 1246 * 1247 * This function (or the equivalent hid_parse() macro) should only be 1248 * called from probe() in drivers, before starting the device. 1249 */ 1250 int hid_open_report(struct hid_device *device) 1251 { 1252 struct hid_parser *parser; 1253 struct hid_item item; 1254 unsigned int size; 1255 const __u8 *start; 1256 const __u8 *end; 1257 const __u8 *next; 1258 int ret; 1259 int i; 1260 static int (*dispatch_type[])(struct hid_parser *parser, 1261 struct hid_item *item) = { 1262 hid_parser_main, 1263 hid_parser_global, 1264 hid_parser_local, 1265 hid_parser_reserved 1266 }; 1267 1268 if (WARN_ON(device->status & HID_STAT_PARSED)) 1269 return -EBUSY; 1270 1271 start = device->bpf_rdesc; 1272 if (WARN_ON(!start)) 1273 return -ENODEV; 1274 size = device->bpf_rsize; 1275 1276 if (device->driver->report_fixup) { 1277 /* 1278 * device->driver->report_fixup() needs to work 1279 * on a copy of our report descriptor so it can 1280 * change it. 1281 */ 1282 __u8 *buf = kmemdup(start, size, GFP_KERNEL); 1283 1284 if (buf == NULL) 1285 return -ENOMEM; 1286 1287 start = device->driver->report_fixup(device, buf, &size); 1288 1289 /* 1290 * The second kmemdup is required in case report_fixup() returns 1291 * a static read-only memory, but we have no idea if that memory 1292 * needs to be cleaned up or not at the end. 1293 */ 1294 start = kmemdup(start, size, GFP_KERNEL); 1295 kfree(buf); 1296 if (start == NULL) 1297 return -ENOMEM; 1298 } 1299 1300 device->rdesc = start; 1301 device->rsize = size; 1302 1303 parser = vzalloc(sizeof(struct hid_parser)); 1304 if (!parser) { 1305 ret = -ENOMEM; 1306 goto alloc_err; 1307 } 1308 1309 parser->device = device; 1310 1311 end = start + size; 1312 1313 device->collection = kcalloc(HID_DEFAULT_NUM_COLLECTIONS, 1314 sizeof(struct hid_collection), GFP_KERNEL); 1315 if (!device->collection) { 1316 ret = -ENOMEM; 1317 goto err; 1318 } 1319 device->collection_size = HID_DEFAULT_NUM_COLLECTIONS; 1320 for (i = 0; i < HID_DEFAULT_NUM_COLLECTIONS; i++) 1321 device->collection[i].parent_idx = -1; 1322 1323 ret = -EINVAL; 1324 while ((next = fetch_item(start, end, &item)) != NULL) { 1325 start = next; 1326 1327 if (item.format != HID_ITEM_FORMAT_SHORT) { 1328 hid_err(device, "unexpected long global item\n"); 1329 goto err; 1330 } 1331 1332 if (dispatch_type[item.type](parser, &item)) { 1333 hid_err(device, "item %u %u %u %u parsing failed\n", 1334 item.format, (unsigned)item.size, 1335 (unsigned)item.type, (unsigned)item.tag); 1336 goto err; 1337 } 1338 1339 if (start == end) { 1340 if (parser->collection_stack_ptr) { 1341 hid_err(device, "unbalanced collection at end of report description\n"); 1342 goto err; 1343 } 1344 if (parser->local.delimiter_depth) { 1345 hid_err(device, "unbalanced delimiter at end of report description\n"); 1346 goto err; 1347 } 1348 1349 /* 1350 * fetch initial values in case the device's 1351 * default multiplier isn't the recommended 1 1352 */ 1353 hid_setup_resolution_multiplier(device); 1354 1355 kfree(parser->collection_stack); 1356 vfree(parser); 1357 device->status |= HID_STAT_PARSED; 1358 1359 return 0; 1360 } 1361 } 1362 1363 hid_err(device, "item fetching failed at offset %u/%u\n", 1364 size - (unsigned int)(end - start), size); 1365 err: 1366 kfree(parser->collection_stack); 1367 alloc_err: 1368 vfree(parser); 1369 hid_close_report(device); 1370 return ret; 1371 } 1372 EXPORT_SYMBOL_GPL(hid_open_report); 1373 1374 /* 1375 * Extract/implement a data field from/to a little endian report (bit array). 1376 * 1377 * Code sort-of follows HID spec: 1378 * http://www.usb.org/developers/hidpage/HID1_11.pdf 1379 * 1380 * While the USB HID spec allows unlimited length bit fields in "report 1381 * descriptors", most devices never use more than 16 bits. 1382 * One model of UPS is claimed to report "LINEV" as a 32-bit field. 1383 * Search linux-kernel and linux-usb-devel archives for "hid-core extract". 1384 */ 1385 1386 static u32 __extract(u8 *report, unsigned offset, int n) 1387 { 1388 unsigned int idx = offset / 8; 1389 unsigned int bit_nr = 0; 1390 unsigned int bit_shift = offset % 8; 1391 int bits_to_copy = 8 - bit_shift; 1392 u32 value = 0; 1393 u32 mask = n < 32 ? (1U << n) - 1 : ~0U; 1394 1395 while (n > 0) { 1396 value |= ((u32)report[idx] >> bit_shift) << bit_nr; 1397 n -= bits_to_copy; 1398 bit_nr += bits_to_copy; 1399 bits_to_copy = 8; 1400 bit_shift = 0; 1401 idx++; 1402 } 1403 1404 return value & mask; 1405 } 1406 1407 u32 hid_field_extract(const struct hid_device *hid, u8 *report, 1408 unsigned offset, unsigned n) 1409 { 1410 if (n > 32) { 1411 hid_warn_once(hid, "%s() called with n (%d) > 32! (%s)\n", 1412 __func__, n, current->comm); 1413 n = 32; 1414 } 1415 1416 return __extract(report, offset, n); 1417 } 1418 EXPORT_SYMBOL_GPL(hid_field_extract); 1419 1420 /* 1421 * "implement" : set bits in a little endian bit stream. 1422 * Same concepts as "extract" (see comments above). 1423 * The data mangled in the bit stream remains in little endian 1424 * order the whole time. It make more sense to talk about 1425 * endianness of register values by considering a register 1426 * a "cached" copy of the little endian bit stream. 1427 */ 1428 1429 static void __implement(u8 *report, unsigned offset, int n, u32 value) 1430 { 1431 unsigned int idx = offset / 8; 1432 unsigned int bit_shift = offset % 8; 1433 int bits_to_set = 8 - bit_shift; 1434 1435 while (n - bits_to_set >= 0) { 1436 report[idx] &= ~(0xff << bit_shift); 1437 report[idx] |= value << bit_shift; 1438 value >>= bits_to_set; 1439 n -= bits_to_set; 1440 bits_to_set = 8; 1441 bit_shift = 0; 1442 idx++; 1443 } 1444 1445 /* last nibble */ 1446 if (n) { 1447 u8 bit_mask = ((1U << n) - 1); 1448 report[idx] &= ~(bit_mask << bit_shift); 1449 report[idx] |= value << bit_shift; 1450 } 1451 } 1452 1453 static void implement(const struct hid_device *hid, u8 *report, 1454 unsigned offset, unsigned n, u32 value) 1455 { 1456 if (unlikely(n > 32)) { 1457 hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n", 1458 __func__, n, current->comm); 1459 n = 32; 1460 } else if (n < 32) { 1461 u32 m = (1U << n) - 1; 1462 1463 if (unlikely(value > m)) { 1464 hid_warn(hid, 1465 "%s() called with too large value %d (n: %d)! (%s)\n", 1466 __func__, value, n, current->comm); 1467 value &= m; 1468 } 1469 } 1470 1471 __implement(report, offset, n, value); 1472 } 1473 1474 /* 1475 * Search an array for a value. 1476 */ 1477 1478 static int search(__s32 *array, __s32 value, unsigned n) 1479 { 1480 while (n--) { 1481 if (*array++ == value) 1482 return 0; 1483 } 1484 return -1; 1485 } 1486 1487 /** 1488 * hid_match_report - check if driver's raw_event should be called 1489 * 1490 * @hid: hid device 1491 * @report: hid report to match against 1492 * 1493 * compare hid->driver->report_table->report_type to report->type 1494 */ 1495 static int hid_match_report(struct hid_device *hid, struct hid_report *report) 1496 { 1497 const struct hid_report_id *id = hid->driver->report_table; 1498 1499 if (!id) /* NULL means all */ 1500 return 1; 1501 1502 for (; id->report_type != HID_TERMINATOR; id++) 1503 if (id->report_type == HID_ANY_ID || 1504 id->report_type == report->type) 1505 return 1; 1506 return 0; 1507 } 1508 1509 /** 1510 * hid_match_usage - check if driver's event should be called 1511 * 1512 * @hid: hid device 1513 * @usage: usage to match against 1514 * 1515 * compare hid->driver->usage_table->usage_{type,code} to 1516 * usage->usage_{type,code} 1517 */ 1518 static int hid_match_usage(struct hid_device *hid, struct hid_usage *usage) 1519 { 1520 const struct hid_usage_id *id = hid->driver->usage_table; 1521 1522 if (!id) /* NULL means all */ 1523 return 1; 1524 1525 for (; id->usage_type != HID_ANY_ID - 1; id++) 1526 if ((id->usage_hid == HID_ANY_ID || 1527 id->usage_hid == usage->hid) && 1528 (id->usage_type == HID_ANY_ID || 1529 id->usage_type == usage->type) && 1530 (id->usage_code == HID_ANY_ID || 1531 id->usage_code == usage->code)) 1532 return 1; 1533 return 0; 1534 } 1535 1536 static void hid_process_event(struct hid_device *hid, struct hid_field *field, 1537 struct hid_usage *usage, __s32 value, int interrupt) 1538 { 1539 struct hid_driver *hdrv = hid->driver; 1540 int ret; 1541 1542 if (!list_empty(&hid->debug_list)) 1543 hid_dump_input(hid, usage, value); 1544 1545 if (hdrv && hdrv->event && hid_match_usage(hid, usage)) { 1546 ret = hdrv->event(hid, field, usage, value); 1547 if (ret != 0) { 1548 if (ret < 0) 1549 hid_err(hid, "%s's event failed with %d\n", 1550 hdrv->name, ret); 1551 return; 1552 } 1553 } 1554 1555 if (hid->claimed & HID_CLAIMED_INPUT) 1556 hidinput_hid_event(hid, field, usage, value); 1557 if (hid->claimed & HID_CLAIMED_HIDDEV && interrupt && hid->hiddev_hid_event) 1558 hid->hiddev_hid_event(hid, field, usage, value); 1559 } 1560 1561 /* 1562 * Checks if the given value is valid within this field 1563 */ 1564 static inline int hid_array_value_is_valid(struct hid_field *field, 1565 __s32 value) 1566 { 1567 __s32 min = field->logical_minimum; 1568 1569 /* 1570 * Value needs to be between logical min and max, and 1571 * (value - min) is used as an index in the usage array. 1572 * This array is of size field->maxusage 1573 */ 1574 return value >= min && 1575 value <= field->logical_maximum && 1576 value - min < field->maxusage; 1577 } 1578 1579 /* 1580 * Fetch the field from the data. The field content is stored for next 1581 * report processing (we do differential reporting to the layer). 1582 */ 1583 static void hid_input_fetch_field(struct hid_device *hid, 1584 struct hid_field *field, 1585 __u8 *data) 1586 { 1587 unsigned n; 1588 unsigned count = field->report_count; 1589 unsigned offset = field->report_offset; 1590 unsigned size = field->report_size; 1591 __s32 min = field->logical_minimum; 1592 __s32 *value; 1593 1594 value = field->new_value; 1595 memset(value, 0, count * sizeof(__s32)); 1596 field->ignored = false; 1597 1598 for (n = 0; n < count; n++) { 1599 1600 value[n] = min < 0 ? 1601 snto32(hid_field_extract(hid, data, offset + n * size, 1602 size), size) : 1603 hid_field_extract(hid, data, offset + n * size, size); 1604 1605 /* Ignore report if ErrorRollOver */ 1606 if (!(field->flags & HID_MAIN_ITEM_VARIABLE) && 1607 hid_array_value_is_valid(field, value[n]) && 1608 field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1) { 1609 field->ignored = true; 1610 return; 1611 } 1612 } 1613 } 1614 1615 /* 1616 * Process a received variable field. 1617 */ 1618 1619 static void hid_input_var_field(struct hid_device *hid, 1620 struct hid_field *field, 1621 int interrupt) 1622 { 1623 unsigned int count = field->report_count; 1624 __s32 *value = field->new_value; 1625 unsigned int n; 1626 1627 for (n = 0; n < count; n++) 1628 hid_process_event(hid, 1629 field, 1630 &field->usage[n], 1631 value[n], 1632 interrupt); 1633 1634 memcpy(field->value, value, count * sizeof(__s32)); 1635 } 1636 1637 /* 1638 * Process a received array field. The field content is stored for 1639 * next report processing (we do differential reporting to the layer). 1640 */ 1641 1642 static void hid_input_array_field(struct hid_device *hid, 1643 struct hid_field *field, 1644 int interrupt) 1645 { 1646 unsigned int n; 1647 unsigned int count = field->report_count; 1648 __s32 min = field->logical_minimum; 1649 __s32 *value; 1650 1651 value = field->new_value; 1652 1653 /* ErrorRollOver */ 1654 if (field->ignored) 1655 return; 1656 1657 for (n = 0; n < count; n++) { 1658 if (hid_array_value_is_valid(field, field->value[n]) && 1659 search(value, field->value[n], count)) 1660 hid_process_event(hid, 1661 field, 1662 &field->usage[field->value[n] - min], 1663 0, 1664 interrupt); 1665 1666 if (hid_array_value_is_valid(field, value[n]) && 1667 search(field->value, value[n], count)) 1668 hid_process_event(hid, 1669 field, 1670 &field->usage[value[n] - min], 1671 1, 1672 interrupt); 1673 } 1674 1675 memcpy(field->value, value, count * sizeof(__s32)); 1676 } 1677 1678 /* 1679 * Analyse a received report, and fetch the data from it. The field 1680 * content is stored for next report processing (we do differential 1681 * reporting to the layer). 1682 */ 1683 static void hid_process_report(struct hid_device *hid, 1684 struct hid_report *report, 1685 __u8 *data, 1686 int interrupt) 1687 { 1688 unsigned int a; 1689 struct hid_field_entry *entry; 1690 struct hid_field *field; 1691 1692 /* first retrieve all incoming values in data */ 1693 for (a = 0; a < report->maxfield; a++) 1694 hid_input_fetch_field(hid, report->field[a], data); 1695 1696 if (!list_empty(&report->field_entry_list)) { 1697 /* INPUT_REPORT, we have a priority list of fields */ 1698 list_for_each_entry(entry, 1699 &report->field_entry_list, 1700 list) { 1701 field = entry->field; 1702 1703 if (field->flags & HID_MAIN_ITEM_VARIABLE) 1704 hid_process_event(hid, 1705 field, 1706 &field->usage[entry->index], 1707 field->new_value[entry->index], 1708 interrupt); 1709 else 1710 hid_input_array_field(hid, field, interrupt); 1711 } 1712 1713 /* we need to do the memcpy at the end for var items */ 1714 for (a = 0; a < report->maxfield; a++) { 1715 field = report->field[a]; 1716 1717 if (field->flags & HID_MAIN_ITEM_VARIABLE) 1718 memcpy(field->value, field->new_value, 1719 field->report_count * sizeof(__s32)); 1720 } 1721 } else { 1722 /* FEATURE_REPORT, regular processing */ 1723 for (a = 0; a < report->maxfield; a++) { 1724 field = report->field[a]; 1725 1726 if (field->flags & HID_MAIN_ITEM_VARIABLE) 1727 hid_input_var_field(hid, field, interrupt); 1728 else 1729 hid_input_array_field(hid, field, interrupt); 1730 } 1731 } 1732 } 1733 1734 /* 1735 * Insert a given usage_index in a field in the list 1736 * of processed usages in the report. 1737 * 1738 * The elements of lower priority score are processed 1739 * first. 1740 */ 1741 static void __hid_insert_field_entry(struct hid_device *hid, 1742 struct hid_report *report, 1743 struct hid_field_entry *entry, 1744 struct hid_field *field, 1745 unsigned int usage_index) 1746 { 1747 struct hid_field_entry *next; 1748 1749 entry->field = field; 1750 entry->index = usage_index; 1751 entry->priority = field->usages_priorities[usage_index]; 1752 1753 /* insert the element at the correct position */ 1754 list_for_each_entry(next, 1755 &report->field_entry_list, 1756 list) { 1757 /* 1758 * the priority of our element is strictly higher 1759 * than the next one, insert it before 1760 */ 1761 if (entry->priority > next->priority) { 1762 list_add_tail(&entry->list, &next->list); 1763 return; 1764 } 1765 } 1766 1767 /* lowest priority score: insert at the end */ 1768 list_add_tail(&entry->list, &report->field_entry_list); 1769 } 1770 1771 static void hid_report_process_ordering(struct hid_device *hid, 1772 struct hid_report *report) 1773 { 1774 struct hid_field *field; 1775 struct hid_field_entry *entries; 1776 unsigned int a, u, usages; 1777 unsigned int count = 0; 1778 1779 /* count the number of individual fields in the report */ 1780 for (a = 0; a < report->maxfield; a++) { 1781 field = report->field[a]; 1782 1783 if (field->flags & HID_MAIN_ITEM_VARIABLE) 1784 count += field->report_count; 1785 else 1786 count++; 1787 } 1788 1789 /* allocate the memory to process the fields */ 1790 entries = kcalloc(count, sizeof(*entries), GFP_KERNEL); 1791 if (!entries) 1792 return; 1793 1794 report->field_entries = entries; 1795 1796 /* 1797 * walk through all fields in the report and 1798 * store them by priority order in report->field_entry_list 1799 * 1800 * - Var elements are individualized (field + usage_index) 1801 * - Arrays are taken as one, we can not chose an order for them 1802 */ 1803 usages = 0; 1804 for (a = 0; a < report->maxfield; a++) { 1805 field = report->field[a]; 1806 1807 if (field->flags & HID_MAIN_ITEM_VARIABLE) { 1808 for (u = 0; u < field->report_count; u++) { 1809 __hid_insert_field_entry(hid, report, 1810 &entries[usages], 1811 field, u); 1812 usages++; 1813 } 1814 } else { 1815 __hid_insert_field_entry(hid, report, &entries[usages], 1816 field, 0); 1817 usages++; 1818 } 1819 } 1820 } 1821 1822 static void hid_process_ordering(struct hid_device *hid) 1823 { 1824 struct hid_report *report; 1825 struct hid_report_enum *report_enum = &hid->report_enum[HID_INPUT_REPORT]; 1826 1827 list_for_each_entry(report, &report_enum->report_list, list) 1828 hid_report_process_ordering(hid, report); 1829 } 1830 1831 /* 1832 * Output the field into the report. 1833 */ 1834 1835 static void hid_output_field(const struct hid_device *hid, 1836 struct hid_field *field, __u8 *data) 1837 { 1838 unsigned count = field->report_count; 1839 unsigned offset = field->report_offset; 1840 unsigned size = field->report_size; 1841 unsigned n; 1842 1843 for (n = 0; n < count; n++) { 1844 if (field->logical_minimum < 0) /* signed values */ 1845 implement(hid, data, offset + n * size, size, 1846 s32ton(field->value[n], size)); 1847 else /* unsigned values */ 1848 implement(hid, data, offset + n * size, size, 1849 field->value[n]); 1850 } 1851 } 1852 1853 /* 1854 * Compute the size of a report. 1855 */ 1856 static size_t hid_compute_report_size(struct hid_report *report) 1857 { 1858 if (report->size) 1859 return ((report->size - 1) >> 3) + 1; 1860 1861 return 0; 1862 } 1863 1864 /* 1865 * Create a report. 'data' has to be allocated using 1866 * hid_alloc_report_buf() so that it has proper size. 1867 */ 1868 1869 void hid_output_report(struct hid_report *report, __u8 *data) 1870 { 1871 unsigned n; 1872 1873 if (report->id > 0) 1874 *data++ = report->id; 1875 1876 memset(data, 0, hid_compute_report_size(report)); 1877 for (n = 0; n < report->maxfield; n++) 1878 hid_output_field(report->device, report->field[n], data); 1879 } 1880 EXPORT_SYMBOL_GPL(hid_output_report); 1881 1882 /* 1883 * Allocator for buffer that is going to be passed to hid_output_report() 1884 */ 1885 u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags) 1886 { 1887 /* 1888 * 7 extra bytes are necessary to achieve proper functionality 1889 * of implement() working on 8 byte chunks 1890 * 1 extra byte for the report ID if it is null (not used) so 1891 * we can reserve that extra byte in the first position of the buffer 1892 * when sending it to .raw_request() 1893 */ 1894 1895 u32 len = hid_report_len(report) + 7 + (report->id == 0); 1896 1897 return kzalloc(len, flags); 1898 } 1899 EXPORT_SYMBOL_GPL(hid_alloc_report_buf); 1900 1901 /* 1902 * Set a field value. The report this field belongs to has to be 1903 * created and transferred to the device, to set this value in the 1904 * device. 1905 */ 1906 1907 int hid_set_field(struct hid_field *field, unsigned offset, __s32 value) 1908 { 1909 unsigned size; 1910 1911 if (!field) 1912 return -1; 1913 1914 size = field->report_size; 1915 1916 hid_dump_input(field->report->device, field->usage + offset, value); 1917 1918 if (offset >= field->report_count) { 1919 hid_err(field->report->device, "offset (%d) exceeds report_count (%d)\n", 1920 offset, field->report_count); 1921 return -1; 1922 } 1923 if (field->logical_minimum < 0) { 1924 if (value != snto32(s32ton(value, size), size)) { 1925 hid_err(field->report->device, "value %d is out of range\n", value); 1926 return -1; 1927 } 1928 } 1929 field->value[offset] = value; 1930 return 0; 1931 } 1932 EXPORT_SYMBOL_GPL(hid_set_field); 1933 1934 struct hid_field *hid_find_field(struct hid_device *hdev, unsigned int report_type, 1935 unsigned int application, unsigned int usage) 1936 { 1937 struct list_head *report_list = &hdev->report_enum[report_type].report_list; 1938 struct hid_report *report; 1939 int i, j; 1940 1941 list_for_each_entry(report, report_list, list) { 1942 if (report->application != application) 1943 continue; 1944 1945 for (i = 0; i < report->maxfield; i++) { 1946 struct hid_field *field = report->field[i]; 1947 1948 for (j = 0; j < field->maxusage; j++) { 1949 if (field->usage[j].hid == usage) 1950 return field; 1951 } 1952 } 1953 } 1954 1955 return NULL; 1956 } 1957 EXPORT_SYMBOL_GPL(hid_find_field); 1958 1959 static struct hid_report *hid_get_report(struct hid_report_enum *report_enum, 1960 const u8 *data) 1961 { 1962 struct hid_report *report; 1963 unsigned int n = 0; /* Normally report number is 0 */ 1964 1965 /* Device uses numbered reports, data[0] is report number */ 1966 if (report_enum->numbered) 1967 n = *data; 1968 1969 report = report_enum->report_id_hash[n]; 1970 if (report == NULL) 1971 dbg_hid("undefined report_id %u received\n", n); 1972 1973 return report; 1974 } 1975 1976 /* 1977 * Implement a generic .request() callback, using .raw_request() 1978 * DO NOT USE in hid drivers directly, but through hid_hw_request instead. 1979 */ 1980 int __hid_request(struct hid_device *hid, struct hid_report *report, 1981 enum hid_class_request reqtype) 1982 { 1983 char *buf, *data_buf; 1984 int ret; 1985 u32 len; 1986 1987 buf = hid_alloc_report_buf(report, GFP_KERNEL); 1988 if (!buf) 1989 return -ENOMEM; 1990 1991 data_buf = buf; 1992 len = hid_report_len(report); 1993 1994 if (report->id == 0) { 1995 /* reserve the first byte for the report ID */ 1996 data_buf++; 1997 len++; 1998 } 1999 2000 if (reqtype == HID_REQ_SET_REPORT) 2001 hid_output_report(report, data_buf); 2002 2003 ret = hid_hw_raw_request(hid, report->id, buf, len, report->type, reqtype); 2004 if (ret < 0) { 2005 dbg_hid("unable to complete request: %d\n", ret); 2006 goto out; 2007 } 2008 2009 if (reqtype == HID_REQ_GET_REPORT) 2010 hid_input_report(hid, report->type, buf, ret, 0); 2011 2012 ret = 0; 2013 2014 out: 2015 kfree(buf); 2016 return ret; 2017 } 2018 EXPORT_SYMBOL_GPL(__hid_request); 2019 2020 int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size, 2021 int interrupt) 2022 { 2023 struct hid_report_enum *report_enum = hid->report_enum + type; 2024 struct hid_report *report; 2025 struct hid_driver *hdrv; 2026 int max_buffer_size = HID_MAX_BUFFER_SIZE; 2027 u32 rsize, csize = size; 2028 u8 *cdata = data; 2029 int ret = 0; 2030 2031 report = hid_get_report(report_enum, data); 2032 if (!report) 2033 goto out; 2034 2035 if (report_enum->numbered) { 2036 cdata++; 2037 csize--; 2038 } 2039 2040 rsize = hid_compute_report_size(report); 2041 2042 if (hid->ll_driver->max_buffer_size) 2043 max_buffer_size = hid->ll_driver->max_buffer_size; 2044 2045 if (report_enum->numbered && rsize >= max_buffer_size) 2046 rsize = max_buffer_size - 1; 2047 else if (rsize > max_buffer_size) 2048 rsize = max_buffer_size; 2049 2050 if (csize < rsize) { 2051 dbg_hid("report %d is too short, (%d < %d)\n", report->id, 2052 csize, rsize); 2053 memset(cdata + csize, 0, rsize - csize); 2054 } 2055 2056 if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event) 2057 hid->hiddev_report_event(hid, report); 2058 if (hid->claimed & HID_CLAIMED_HIDRAW) { 2059 ret = hidraw_report_event(hid, data, size); 2060 if (ret) 2061 goto out; 2062 } 2063 2064 if (hid->claimed != HID_CLAIMED_HIDRAW && report->maxfield) { 2065 hid_process_report(hid, report, cdata, interrupt); 2066 hdrv = hid->driver; 2067 if (hdrv && hdrv->report) 2068 hdrv->report(hid, report); 2069 } 2070 2071 if (hid->claimed & HID_CLAIMED_INPUT) 2072 hidinput_report_event(hid, report); 2073 out: 2074 return ret; 2075 } 2076 EXPORT_SYMBOL_GPL(hid_report_raw_event); 2077 2078 2079 static int __hid_input_report(struct hid_device *hid, enum hid_report_type type, 2080 u8 *data, u32 size, int interrupt, u64 source, bool from_bpf, 2081 bool lock_already_taken) 2082 { 2083 struct hid_report_enum *report_enum; 2084 struct hid_driver *hdrv; 2085 struct hid_report *report; 2086 int ret = 0; 2087 2088 if (!hid) 2089 return -ENODEV; 2090 2091 ret = down_trylock(&hid->driver_input_lock); 2092 if (lock_already_taken && !ret) { 2093 up(&hid->driver_input_lock); 2094 return -EINVAL; 2095 } else if (!lock_already_taken && ret) { 2096 return -EBUSY; 2097 } 2098 2099 if (!hid->driver) { 2100 ret = -ENODEV; 2101 goto unlock; 2102 } 2103 report_enum = hid->report_enum + type; 2104 hdrv = hid->driver; 2105 2106 data = dispatch_hid_bpf_device_event(hid, type, data, &size, interrupt, source, from_bpf); 2107 if (IS_ERR(data)) { 2108 ret = PTR_ERR(data); 2109 goto unlock; 2110 } 2111 2112 if (!size) { 2113 dbg_hid("empty report\n"); 2114 ret = -1; 2115 goto unlock; 2116 } 2117 2118 /* Avoid unnecessary overhead if debugfs is disabled */ 2119 if (!list_empty(&hid->debug_list)) 2120 hid_dump_report(hid, type, data, size); 2121 2122 report = hid_get_report(report_enum, data); 2123 2124 if (!report) { 2125 ret = -1; 2126 goto unlock; 2127 } 2128 2129 if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) { 2130 ret = hdrv->raw_event(hid, report, data, size); 2131 if (ret < 0) 2132 goto unlock; 2133 } 2134 2135 ret = hid_report_raw_event(hid, type, data, size, interrupt); 2136 2137 unlock: 2138 if (!lock_already_taken) 2139 up(&hid->driver_input_lock); 2140 return ret; 2141 } 2142 2143 /** 2144 * hid_input_report - report data from lower layer (usb, bt...) 2145 * 2146 * @hid: hid device 2147 * @type: HID report type (HID_*_REPORT) 2148 * @data: report contents 2149 * @size: size of data parameter 2150 * @interrupt: distinguish between interrupt and control transfers 2151 * 2152 * This is data entry for lower layers. 2153 */ 2154 int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size, 2155 int interrupt) 2156 { 2157 return __hid_input_report(hid, type, data, size, interrupt, 0, 2158 false, /* from_bpf */ 2159 false /* lock_already_taken */); 2160 } 2161 EXPORT_SYMBOL_GPL(hid_input_report); 2162 2163 bool hid_match_one_id(const struct hid_device *hdev, 2164 const struct hid_device_id *id) 2165 { 2166 return (id->bus == HID_BUS_ANY || id->bus == hdev->bus) && 2167 (id->group == HID_GROUP_ANY || id->group == hdev->group) && 2168 (id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) && 2169 (id->product == HID_ANY_ID || id->product == hdev->product); 2170 } 2171 2172 const struct hid_device_id *hid_match_id(const struct hid_device *hdev, 2173 const struct hid_device_id *id) 2174 { 2175 for (; id->bus; id++) 2176 if (hid_match_one_id(hdev, id)) 2177 return id; 2178 2179 return NULL; 2180 } 2181 EXPORT_SYMBOL_GPL(hid_match_id); 2182 2183 static const struct hid_device_id hid_hiddev_list[] = { 2184 { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS) }, 2185 { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS1) }, 2186 { } 2187 }; 2188 2189 static bool hid_hiddev(struct hid_device *hdev) 2190 { 2191 return !!hid_match_id(hdev, hid_hiddev_list); 2192 } 2193 2194 2195 static ssize_t 2196 report_descriptor_read(struct file *filp, struct kobject *kobj, 2197 const struct bin_attribute *attr, 2198 char *buf, loff_t off, size_t count) 2199 { 2200 struct device *dev = kobj_to_dev(kobj); 2201 struct hid_device *hdev = to_hid_device(dev); 2202 2203 if (off >= hdev->rsize) 2204 return 0; 2205 2206 if (off + count > hdev->rsize) 2207 count = hdev->rsize - off; 2208 2209 memcpy(buf, hdev->rdesc + off, count); 2210 2211 return count; 2212 } 2213 2214 static ssize_t 2215 country_show(struct device *dev, struct device_attribute *attr, 2216 char *buf) 2217 { 2218 struct hid_device *hdev = to_hid_device(dev); 2219 2220 return sprintf(buf, "%02x\n", hdev->country & 0xff); 2221 } 2222 2223 static const BIN_ATTR_RO(report_descriptor, HID_MAX_DESCRIPTOR_SIZE); 2224 2225 static const DEVICE_ATTR_RO(country); 2226 2227 int hid_connect(struct hid_device *hdev, unsigned int connect_mask) 2228 { 2229 static const char *types[] = { "Device", "Pointer", "Mouse", "Device", 2230 "Joystick", "Gamepad", "Keyboard", "Keypad", 2231 "Multi-Axis Controller" 2232 }; 2233 const char *type, *bus; 2234 char buf[64] = ""; 2235 unsigned int i; 2236 int len; 2237 int ret; 2238 2239 ret = hid_bpf_connect_device(hdev); 2240 if (ret) 2241 return ret; 2242 2243 if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE) 2244 connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV); 2245 if (hdev->quirks & HID_QUIRK_HIDINPUT_FORCE) 2246 connect_mask |= HID_CONNECT_HIDINPUT_FORCE; 2247 if (hdev->bus != BUS_USB) 2248 connect_mask &= ~HID_CONNECT_HIDDEV; 2249 if (hid_hiddev(hdev)) 2250 connect_mask |= HID_CONNECT_HIDDEV_FORCE; 2251 2252 if ((connect_mask & HID_CONNECT_HIDINPUT) && !hidinput_connect(hdev, 2253 connect_mask & HID_CONNECT_HIDINPUT_FORCE)) 2254 hdev->claimed |= HID_CLAIMED_INPUT; 2255 2256 if ((connect_mask & HID_CONNECT_HIDDEV) && hdev->hiddev_connect && 2257 !hdev->hiddev_connect(hdev, 2258 connect_mask & HID_CONNECT_HIDDEV_FORCE)) 2259 hdev->claimed |= HID_CLAIMED_HIDDEV; 2260 if ((connect_mask & HID_CONNECT_HIDRAW) && !hidraw_connect(hdev)) 2261 hdev->claimed |= HID_CLAIMED_HIDRAW; 2262 2263 if (connect_mask & HID_CONNECT_DRIVER) 2264 hdev->claimed |= HID_CLAIMED_DRIVER; 2265 2266 /* Drivers with the ->raw_event callback set are not required to connect 2267 * to any other listener. */ 2268 if (!hdev->claimed && !hdev->driver->raw_event) { 2269 hid_err(hdev, "device has no listeners, quitting\n"); 2270 return -ENODEV; 2271 } 2272 2273 hid_process_ordering(hdev); 2274 2275 if ((hdev->claimed & HID_CLAIMED_INPUT) && 2276 (connect_mask & HID_CONNECT_FF) && hdev->ff_init) 2277 hdev->ff_init(hdev); 2278 2279 len = 0; 2280 if (hdev->claimed & HID_CLAIMED_INPUT) 2281 len += sprintf(buf + len, "input"); 2282 if (hdev->claimed & HID_CLAIMED_HIDDEV) 2283 len += sprintf(buf + len, "%shiddev%d", len ? "," : "", 2284 ((struct hiddev *)hdev->hiddev)->minor); 2285 if (hdev->claimed & HID_CLAIMED_HIDRAW) 2286 len += sprintf(buf + len, "%shidraw%d", len ? "," : "", 2287 ((struct hidraw *)hdev->hidraw)->minor); 2288 2289 type = "Device"; 2290 for (i = 0; i < hdev->maxcollection; i++) { 2291 struct hid_collection *col = &hdev->collection[i]; 2292 if (col->type == HID_COLLECTION_APPLICATION && 2293 (col->usage & HID_USAGE_PAGE) == HID_UP_GENDESK && 2294 (col->usage & 0xffff) < ARRAY_SIZE(types)) { 2295 type = types[col->usage & 0xffff]; 2296 break; 2297 } 2298 } 2299 2300 switch (hdev->bus) { 2301 case BUS_USB: 2302 bus = "USB"; 2303 break; 2304 case BUS_BLUETOOTH: 2305 bus = "BLUETOOTH"; 2306 break; 2307 case BUS_I2C: 2308 bus = "I2C"; 2309 break; 2310 case BUS_SDW: 2311 bus = "SOUNDWIRE"; 2312 break; 2313 case BUS_VIRTUAL: 2314 bus = "VIRTUAL"; 2315 break; 2316 case BUS_INTEL_ISHTP: 2317 case BUS_AMD_SFH: 2318 bus = "SENSOR HUB"; 2319 break; 2320 default: 2321 bus = "<UNKNOWN>"; 2322 } 2323 2324 ret = device_create_file(&hdev->dev, &dev_attr_country); 2325 if (ret) 2326 hid_warn(hdev, 2327 "can't create sysfs country code attribute err: %d\n", ret); 2328 2329 hid_info(hdev, "%s: %s HID v%x.%02x %s [%s] on %s\n", 2330 buf, bus, hdev->version >> 8, hdev->version & 0xff, 2331 type, hdev->name, hdev->phys); 2332 2333 return 0; 2334 } 2335 EXPORT_SYMBOL_GPL(hid_connect); 2336 2337 void hid_disconnect(struct hid_device *hdev) 2338 { 2339 device_remove_file(&hdev->dev, &dev_attr_country); 2340 if (hdev->claimed & HID_CLAIMED_INPUT) 2341 hidinput_disconnect(hdev); 2342 if (hdev->claimed & HID_CLAIMED_HIDDEV) 2343 hdev->hiddev_disconnect(hdev); 2344 if (hdev->claimed & HID_CLAIMED_HIDRAW) 2345 hidraw_disconnect(hdev); 2346 hdev->claimed = 0; 2347 2348 hid_bpf_disconnect_device(hdev); 2349 } 2350 EXPORT_SYMBOL_GPL(hid_disconnect); 2351 2352 /** 2353 * hid_hw_start - start underlying HW 2354 * @hdev: hid device 2355 * @connect_mask: which outputs to connect, see HID_CONNECT_* 2356 * 2357 * Call this in probe function *after* hid_parse. This will setup HW 2358 * buffers and start the device (if not defeirred to device open). 2359 * hid_hw_stop must be called if this was successful. 2360 */ 2361 int hid_hw_start(struct hid_device *hdev, unsigned int connect_mask) 2362 { 2363 int error; 2364 2365 error = hdev->ll_driver->start(hdev); 2366 if (error) 2367 return error; 2368 2369 if (connect_mask) { 2370 error = hid_connect(hdev, connect_mask); 2371 if (error) { 2372 hdev->ll_driver->stop(hdev); 2373 return error; 2374 } 2375 } 2376 2377 return 0; 2378 } 2379 EXPORT_SYMBOL_GPL(hid_hw_start); 2380 2381 /** 2382 * hid_hw_stop - stop underlying HW 2383 * @hdev: hid device 2384 * 2385 * This is usually called from remove function or from probe when something 2386 * failed and hid_hw_start was called already. 2387 */ 2388 void hid_hw_stop(struct hid_device *hdev) 2389 { 2390 hid_disconnect(hdev); 2391 hdev->ll_driver->stop(hdev); 2392 } 2393 EXPORT_SYMBOL_GPL(hid_hw_stop); 2394 2395 /** 2396 * hid_hw_open - signal underlying HW to start delivering events 2397 * @hdev: hid device 2398 * 2399 * Tell underlying HW to start delivering events from the device. 2400 * This function should be called sometime after successful call 2401 * to hid_hw_start(). 2402 */ 2403 int hid_hw_open(struct hid_device *hdev) 2404 { 2405 int ret; 2406 2407 ret = mutex_lock_killable(&hdev->ll_open_lock); 2408 if (ret) 2409 return ret; 2410 2411 if (!hdev->ll_open_count++) { 2412 ret = hdev->ll_driver->open(hdev); 2413 if (ret) 2414 hdev->ll_open_count--; 2415 2416 if (hdev->driver->on_hid_hw_open) 2417 hdev->driver->on_hid_hw_open(hdev); 2418 } 2419 2420 mutex_unlock(&hdev->ll_open_lock); 2421 return ret; 2422 } 2423 EXPORT_SYMBOL_GPL(hid_hw_open); 2424 2425 /** 2426 * hid_hw_close - signal underlaying HW to stop delivering events 2427 * 2428 * @hdev: hid device 2429 * 2430 * This function indicates that we are not interested in the events 2431 * from this device anymore. Delivery of events may or may not stop, 2432 * depending on the number of users still outstanding. 2433 */ 2434 void hid_hw_close(struct hid_device *hdev) 2435 { 2436 mutex_lock(&hdev->ll_open_lock); 2437 if (!--hdev->ll_open_count) { 2438 hdev->ll_driver->close(hdev); 2439 2440 if (hdev->driver->on_hid_hw_close) 2441 hdev->driver->on_hid_hw_close(hdev); 2442 } 2443 mutex_unlock(&hdev->ll_open_lock); 2444 } 2445 EXPORT_SYMBOL_GPL(hid_hw_close); 2446 2447 /** 2448 * hid_hw_request - send report request to device 2449 * 2450 * @hdev: hid device 2451 * @report: report to send 2452 * @reqtype: hid request type 2453 */ 2454 void hid_hw_request(struct hid_device *hdev, 2455 struct hid_report *report, enum hid_class_request reqtype) 2456 { 2457 if (hdev->ll_driver->request) 2458 return hdev->ll_driver->request(hdev, report, reqtype); 2459 2460 __hid_request(hdev, report, reqtype); 2461 } 2462 EXPORT_SYMBOL_GPL(hid_hw_request); 2463 2464 int __hid_hw_raw_request(struct hid_device *hdev, 2465 unsigned char reportnum, __u8 *buf, 2466 size_t len, enum hid_report_type rtype, 2467 enum hid_class_request reqtype, 2468 u64 source, bool from_bpf) 2469 { 2470 unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE; 2471 int ret; 2472 2473 if (hdev->ll_driver->max_buffer_size) 2474 max_buffer_size = hdev->ll_driver->max_buffer_size; 2475 2476 if (len < 1 || len > max_buffer_size || !buf) 2477 return -EINVAL; 2478 2479 ret = dispatch_hid_bpf_raw_requests(hdev, reportnum, buf, len, rtype, 2480 reqtype, source, from_bpf); 2481 if (ret) 2482 return ret; 2483 2484 return hdev->ll_driver->raw_request(hdev, reportnum, buf, len, 2485 rtype, reqtype); 2486 } 2487 2488 /** 2489 * hid_hw_raw_request - send report request to device 2490 * 2491 * @hdev: hid device 2492 * @reportnum: report ID 2493 * @buf: in/out data to transfer 2494 * @len: length of buf 2495 * @rtype: HID report type 2496 * @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT 2497 * 2498 * Return: count of data transferred, negative if error 2499 * 2500 * Same behavior as hid_hw_request, but with raw buffers instead. 2501 */ 2502 int hid_hw_raw_request(struct hid_device *hdev, 2503 unsigned char reportnum, __u8 *buf, 2504 size_t len, enum hid_report_type rtype, enum hid_class_request reqtype) 2505 { 2506 return __hid_hw_raw_request(hdev, reportnum, buf, len, rtype, reqtype, 0, false); 2507 } 2508 EXPORT_SYMBOL_GPL(hid_hw_raw_request); 2509 2510 int __hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len, u64 source, 2511 bool from_bpf) 2512 { 2513 unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE; 2514 int ret; 2515 2516 if (hdev->ll_driver->max_buffer_size) 2517 max_buffer_size = hdev->ll_driver->max_buffer_size; 2518 2519 if (len < 1 || len > max_buffer_size || !buf) 2520 return -EINVAL; 2521 2522 ret = dispatch_hid_bpf_output_report(hdev, buf, len, source, from_bpf); 2523 if (ret) 2524 return ret; 2525 2526 if (hdev->ll_driver->output_report) 2527 return hdev->ll_driver->output_report(hdev, buf, len); 2528 2529 return -ENOSYS; 2530 } 2531 2532 /** 2533 * hid_hw_output_report - send output report to device 2534 * 2535 * @hdev: hid device 2536 * @buf: raw data to transfer 2537 * @len: length of buf 2538 * 2539 * Return: count of data transferred, negative if error 2540 */ 2541 int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len) 2542 { 2543 return __hid_hw_output_report(hdev, buf, len, 0, false); 2544 } 2545 EXPORT_SYMBOL_GPL(hid_hw_output_report); 2546 2547 #ifdef CONFIG_PM 2548 int hid_driver_suspend(struct hid_device *hdev, pm_message_t state) 2549 { 2550 if (hdev->driver && hdev->driver->suspend) 2551 return hdev->driver->suspend(hdev, state); 2552 2553 return 0; 2554 } 2555 EXPORT_SYMBOL_GPL(hid_driver_suspend); 2556 2557 int hid_driver_reset_resume(struct hid_device *hdev) 2558 { 2559 if (hdev->driver && hdev->driver->reset_resume) 2560 return hdev->driver->reset_resume(hdev); 2561 2562 return 0; 2563 } 2564 EXPORT_SYMBOL_GPL(hid_driver_reset_resume); 2565 2566 int hid_driver_resume(struct hid_device *hdev) 2567 { 2568 if (hdev->driver && hdev->driver->resume) 2569 return hdev->driver->resume(hdev); 2570 2571 return 0; 2572 } 2573 EXPORT_SYMBOL_GPL(hid_driver_resume); 2574 #endif /* CONFIG_PM */ 2575 2576 struct hid_dynid { 2577 struct list_head list; 2578 struct hid_device_id id; 2579 }; 2580 2581 /** 2582 * new_id_store - add a new HID device ID to this driver and re-probe devices 2583 * @drv: target device driver 2584 * @buf: buffer for scanning device ID data 2585 * @count: input size 2586 * 2587 * Adds a new dynamic hid device ID to this driver, 2588 * and causes the driver to probe for all devices again. 2589 */ 2590 static ssize_t new_id_store(struct device_driver *drv, const char *buf, 2591 size_t count) 2592 { 2593 struct hid_driver *hdrv = to_hid_driver(drv); 2594 struct hid_dynid *dynid; 2595 __u32 bus, vendor, product; 2596 unsigned long driver_data = 0; 2597 int ret; 2598 2599 ret = sscanf(buf, "%x %x %x %lx", 2600 &bus, &vendor, &product, &driver_data); 2601 if (ret < 3) 2602 return -EINVAL; 2603 2604 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); 2605 if (!dynid) 2606 return -ENOMEM; 2607 2608 dynid->id.bus = bus; 2609 dynid->id.group = HID_GROUP_ANY; 2610 dynid->id.vendor = vendor; 2611 dynid->id.product = product; 2612 dynid->id.driver_data = driver_data; 2613 2614 spin_lock(&hdrv->dyn_lock); 2615 list_add_tail(&dynid->list, &hdrv->dyn_list); 2616 spin_unlock(&hdrv->dyn_lock); 2617 2618 ret = driver_attach(&hdrv->driver); 2619 2620 return ret ? : count; 2621 } 2622 static DRIVER_ATTR_WO(new_id); 2623 2624 static struct attribute *hid_drv_attrs[] = { 2625 &driver_attr_new_id.attr, 2626 NULL, 2627 }; 2628 ATTRIBUTE_GROUPS(hid_drv); 2629 2630 static void hid_free_dynids(struct hid_driver *hdrv) 2631 { 2632 struct hid_dynid *dynid, *n; 2633 2634 spin_lock(&hdrv->dyn_lock); 2635 list_for_each_entry_safe(dynid, n, &hdrv->dyn_list, list) { 2636 list_del(&dynid->list); 2637 kfree(dynid); 2638 } 2639 spin_unlock(&hdrv->dyn_lock); 2640 } 2641 2642 const struct hid_device_id *hid_match_device(struct hid_device *hdev, 2643 struct hid_driver *hdrv) 2644 { 2645 struct hid_dynid *dynid; 2646 2647 spin_lock(&hdrv->dyn_lock); 2648 list_for_each_entry(dynid, &hdrv->dyn_list, list) { 2649 if (hid_match_one_id(hdev, &dynid->id)) { 2650 spin_unlock(&hdrv->dyn_lock); 2651 return &dynid->id; 2652 } 2653 } 2654 spin_unlock(&hdrv->dyn_lock); 2655 2656 return hid_match_id(hdev, hdrv->id_table); 2657 } 2658 EXPORT_SYMBOL_GPL(hid_match_device); 2659 2660 static int hid_bus_match(struct device *dev, const struct device_driver *drv) 2661 { 2662 struct hid_driver *hdrv = to_hid_driver(drv); 2663 struct hid_device *hdev = to_hid_device(dev); 2664 2665 return hid_match_device(hdev, hdrv) != NULL; 2666 } 2667 2668 /** 2669 * hid_compare_device_paths - check if both devices share the same path 2670 * @hdev_a: hid device 2671 * @hdev_b: hid device 2672 * @separator: char to use as separator 2673 * 2674 * Check if two devices share the same path up to the last occurrence of 2675 * the separator char. Both paths must exist (i.e., zero-length paths 2676 * don't match). 2677 */ 2678 bool hid_compare_device_paths(struct hid_device *hdev_a, 2679 struct hid_device *hdev_b, char separator) 2680 { 2681 int n1 = strrchr(hdev_a->phys, separator) - hdev_a->phys; 2682 int n2 = strrchr(hdev_b->phys, separator) - hdev_b->phys; 2683 2684 if (n1 != n2 || n1 <= 0 || n2 <= 0) 2685 return false; 2686 2687 return !strncmp(hdev_a->phys, hdev_b->phys, n1); 2688 } 2689 EXPORT_SYMBOL_GPL(hid_compare_device_paths); 2690 2691 static bool hid_check_device_match(struct hid_device *hdev, 2692 struct hid_driver *hdrv, 2693 const struct hid_device_id **id) 2694 { 2695 *id = hid_match_device(hdev, hdrv); 2696 if (!*id) 2697 return false; 2698 2699 if (hdrv->match) 2700 return hdrv->match(hdev, hid_ignore_special_drivers); 2701 2702 /* 2703 * hid-generic implements .match(), so we must be dealing with a 2704 * different HID driver here, and can simply check if 2705 * hid_ignore_special_drivers or HID_QUIRK_IGNORE_SPECIAL_DRIVER 2706 * are set or not. 2707 */ 2708 return !hid_ignore_special_drivers && !(hdev->quirks & HID_QUIRK_IGNORE_SPECIAL_DRIVER); 2709 } 2710 2711 static int __hid_device_probe(struct hid_device *hdev, struct hid_driver *hdrv) 2712 { 2713 const struct hid_device_id *id; 2714 int ret; 2715 2716 if (!hdev->bpf_rsize) { 2717 /* in case a bpf program gets detached, we need to free the old one */ 2718 hid_free_bpf_rdesc(hdev); 2719 2720 /* keep this around so we know we called it once */ 2721 hdev->bpf_rsize = hdev->dev_rsize; 2722 2723 /* call_hid_bpf_rdesc_fixup will always return a valid pointer */ 2724 hdev->bpf_rdesc = call_hid_bpf_rdesc_fixup(hdev, hdev->dev_rdesc, 2725 &hdev->bpf_rsize); 2726 } 2727 2728 if (!hid_check_device_match(hdev, hdrv, &id)) 2729 return -ENODEV; 2730 2731 hdev->devres_group_id = devres_open_group(&hdev->dev, NULL, GFP_KERNEL); 2732 if (!hdev->devres_group_id) 2733 return -ENOMEM; 2734 2735 /* reset the quirks that has been previously set */ 2736 hdev->quirks = hid_lookup_quirk(hdev); 2737 hdev->driver = hdrv; 2738 2739 if (hdrv->probe) { 2740 ret = hdrv->probe(hdev, id); 2741 } else { /* default probe */ 2742 ret = hid_open_report(hdev); 2743 if (!ret) 2744 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); 2745 } 2746 2747 /* 2748 * Note that we are not closing the devres group opened above so 2749 * even resources that were attached to the device after probe is 2750 * run are released when hid_device_remove() is executed. This is 2751 * needed as some drivers would allocate additional resources, 2752 * for example when updating firmware. 2753 */ 2754 2755 if (ret) { 2756 devres_release_group(&hdev->dev, hdev->devres_group_id); 2757 hid_close_report(hdev); 2758 hdev->driver = NULL; 2759 } 2760 2761 return ret; 2762 } 2763 2764 static int hid_device_probe(struct device *dev) 2765 { 2766 struct hid_device *hdev = to_hid_device(dev); 2767 struct hid_driver *hdrv = to_hid_driver(dev->driver); 2768 int ret = 0; 2769 2770 if (down_interruptible(&hdev->driver_input_lock)) 2771 return -EINTR; 2772 2773 hdev->io_started = false; 2774 clear_bit(ffs(HID_STAT_REPROBED), &hdev->status); 2775 2776 if (!hdev->driver) 2777 ret = __hid_device_probe(hdev, hdrv); 2778 2779 if (!hdev->io_started) 2780 up(&hdev->driver_input_lock); 2781 2782 return ret; 2783 } 2784 2785 static void hid_device_remove(struct device *dev) 2786 { 2787 struct hid_device *hdev = to_hid_device(dev); 2788 struct hid_driver *hdrv; 2789 2790 down(&hdev->driver_input_lock); 2791 hdev->io_started = false; 2792 2793 hdrv = hdev->driver; 2794 if (hdrv) { 2795 if (hdrv->remove) 2796 hdrv->remove(hdev); 2797 else /* default remove */ 2798 hid_hw_stop(hdev); 2799 2800 /* Release all devres resources allocated by the driver */ 2801 devres_release_group(&hdev->dev, hdev->devres_group_id); 2802 2803 hid_close_report(hdev); 2804 hdev->driver = NULL; 2805 } 2806 2807 if (!hdev->io_started) 2808 up(&hdev->driver_input_lock); 2809 } 2810 2811 static ssize_t modalias_show(struct device *dev, struct device_attribute *a, 2812 char *buf) 2813 { 2814 struct hid_device *hdev = container_of(dev, struct hid_device, dev); 2815 2816 return sysfs_emit(buf, "hid:b%04Xg%04Xv%08Xp%08X\n", 2817 hdev->bus, hdev->group, hdev->vendor, hdev->product); 2818 } 2819 static DEVICE_ATTR_RO(modalias); 2820 2821 static struct attribute *hid_dev_attrs[] = { 2822 &dev_attr_modalias.attr, 2823 NULL, 2824 }; 2825 static const struct bin_attribute *hid_dev_bin_attrs[] = { 2826 &bin_attr_report_descriptor, 2827 NULL 2828 }; 2829 static const struct attribute_group hid_dev_group = { 2830 .attrs = hid_dev_attrs, 2831 .bin_attrs = hid_dev_bin_attrs, 2832 }; 2833 __ATTRIBUTE_GROUPS(hid_dev); 2834 2835 static int hid_uevent(const struct device *dev, struct kobj_uevent_env *env) 2836 { 2837 const struct hid_device *hdev = to_hid_device(dev); 2838 2839 if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X", 2840 hdev->bus, hdev->vendor, hdev->product)) 2841 return -ENOMEM; 2842 2843 if (add_uevent_var(env, "HID_NAME=%s", hdev->name)) 2844 return -ENOMEM; 2845 2846 if (add_uevent_var(env, "HID_PHYS=%s", hdev->phys)) 2847 return -ENOMEM; 2848 2849 if (add_uevent_var(env, "HID_UNIQ=%s", hdev->uniq)) 2850 return -ENOMEM; 2851 2852 if (add_uevent_var(env, "MODALIAS=hid:b%04Xg%04Xv%08Xp%08X", 2853 hdev->bus, hdev->group, hdev->vendor, hdev->product)) 2854 return -ENOMEM; 2855 2856 return 0; 2857 } 2858 2859 const struct bus_type hid_bus_type = { 2860 .name = "hid", 2861 .dev_groups = hid_dev_groups, 2862 .drv_groups = hid_drv_groups, 2863 .match = hid_bus_match, 2864 .probe = hid_device_probe, 2865 .remove = hid_device_remove, 2866 .uevent = hid_uevent, 2867 }; 2868 EXPORT_SYMBOL(hid_bus_type); 2869 2870 int hid_add_device(struct hid_device *hdev) 2871 { 2872 static atomic_t id = ATOMIC_INIT(0); 2873 int ret; 2874 2875 if (WARN_ON(hdev->status & HID_STAT_ADDED)) 2876 return -EBUSY; 2877 2878 hdev->quirks = hid_lookup_quirk(hdev); 2879 2880 /* we need to kill them here, otherwise they will stay allocated to 2881 * wait for coming driver */ 2882 if (hid_ignore(hdev)) 2883 return -ENODEV; 2884 2885 /* 2886 * Check for the mandatory transport channel. 2887 */ 2888 if (!hdev->ll_driver->raw_request) { 2889 hid_err(hdev, "transport driver missing .raw_request()\n"); 2890 return -EINVAL; 2891 } 2892 2893 /* 2894 * Read the device report descriptor once and use as template 2895 * for the driver-specific modifications. 2896 */ 2897 ret = hdev->ll_driver->parse(hdev); 2898 if (ret) 2899 return ret; 2900 if (!hdev->dev_rdesc) 2901 return -ENODEV; 2902 2903 /* 2904 * Scan generic devices for group information 2905 */ 2906 if (hid_ignore_special_drivers) { 2907 hdev->group = HID_GROUP_GENERIC; 2908 } else if (!hdev->group && 2909 !(hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)) { 2910 ret = hid_scan_report(hdev); 2911 if (ret) 2912 hid_warn(hdev, "bad device descriptor (%d)\n", ret); 2913 } 2914 2915 hdev->id = atomic_inc_return(&id); 2916 2917 /* XXX hack, any other cleaner solution after the driver core 2918 * is converted to allow more than 20 bytes as the device name? */ 2919 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus, 2920 hdev->vendor, hdev->product, hdev->id); 2921 2922 hid_debug_register(hdev, dev_name(&hdev->dev)); 2923 ret = device_add(&hdev->dev); 2924 if (!ret) 2925 hdev->status |= HID_STAT_ADDED; 2926 else 2927 hid_debug_unregister(hdev); 2928 2929 return ret; 2930 } 2931 EXPORT_SYMBOL_GPL(hid_add_device); 2932 2933 /** 2934 * hid_allocate_device - allocate new hid device descriptor 2935 * 2936 * Allocate and initialize hid device, so that hid_destroy_device might be 2937 * used to free it. 2938 * 2939 * New hid_device pointer is returned on success, otherwise ERR_PTR encoded 2940 * error value. 2941 */ 2942 struct hid_device *hid_allocate_device(void) 2943 { 2944 struct hid_device *hdev; 2945 int ret = -ENOMEM; 2946 2947 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL); 2948 if (hdev == NULL) 2949 return ERR_PTR(ret); 2950 2951 device_initialize(&hdev->dev); 2952 hdev->dev.release = hid_device_release; 2953 hdev->dev.bus = &hid_bus_type; 2954 device_enable_async_suspend(&hdev->dev); 2955 2956 hid_close_report(hdev); 2957 2958 init_waitqueue_head(&hdev->debug_wait); 2959 INIT_LIST_HEAD(&hdev->debug_list); 2960 spin_lock_init(&hdev->debug_list_lock); 2961 sema_init(&hdev->driver_input_lock, 1); 2962 mutex_init(&hdev->ll_open_lock); 2963 kref_init(&hdev->ref); 2964 2965 ret = hid_bpf_device_init(hdev); 2966 if (ret) 2967 goto out_err; 2968 2969 return hdev; 2970 2971 out_err: 2972 hid_destroy_device(hdev); 2973 return ERR_PTR(ret); 2974 } 2975 EXPORT_SYMBOL_GPL(hid_allocate_device); 2976 2977 static void hid_remove_device(struct hid_device *hdev) 2978 { 2979 if (hdev->status & HID_STAT_ADDED) { 2980 device_del(&hdev->dev); 2981 hid_debug_unregister(hdev); 2982 hdev->status &= ~HID_STAT_ADDED; 2983 } 2984 hid_free_bpf_rdesc(hdev); 2985 kfree(hdev->dev_rdesc); 2986 hdev->dev_rdesc = NULL; 2987 hdev->dev_rsize = 0; 2988 hdev->bpf_rsize = 0; 2989 } 2990 2991 /** 2992 * hid_destroy_device - free previously allocated device 2993 * 2994 * @hdev: hid device 2995 * 2996 * If you allocate hid_device through hid_allocate_device, you should ever 2997 * free by this function. 2998 */ 2999 void hid_destroy_device(struct hid_device *hdev) 3000 { 3001 hid_bpf_destroy_device(hdev); 3002 hid_remove_device(hdev); 3003 put_device(&hdev->dev); 3004 } 3005 EXPORT_SYMBOL_GPL(hid_destroy_device); 3006 3007 3008 static int __hid_bus_reprobe_drivers(struct device *dev, void *data) 3009 { 3010 struct hid_driver *hdrv = data; 3011 struct hid_device *hdev = to_hid_device(dev); 3012 3013 if (hdev->driver == hdrv && 3014 !hdrv->match(hdev, hid_ignore_special_drivers) && 3015 !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status)) 3016 return device_reprobe(dev); 3017 3018 return 0; 3019 } 3020 3021 static int __hid_bus_driver_added(struct device_driver *drv, void *data) 3022 { 3023 struct hid_driver *hdrv = to_hid_driver(drv); 3024 3025 if (hdrv->match) { 3026 bus_for_each_dev(&hid_bus_type, NULL, hdrv, 3027 __hid_bus_reprobe_drivers); 3028 } 3029 3030 return 0; 3031 } 3032 3033 static int __bus_removed_driver(struct device_driver *drv, void *data) 3034 { 3035 return bus_rescan_devices(&hid_bus_type); 3036 } 3037 3038 int __hid_register_driver(struct hid_driver *hdrv, struct module *owner, 3039 const char *mod_name) 3040 { 3041 int ret; 3042 3043 hdrv->driver.name = hdrv->name; 3044 hdrv->driver.bus = &hid_bus_type; 3045 hdrv->driver.owner = owner; 3046 hdrv->driver.mod_name = mod_name; 3047 3048 INIT_LIST_HEAD(&hdrv->dyn_list); 3049 spin_lock_init(&hdrv->dyn_lock); 3050 3051 ret = driver_register(&hdrv->driver); 3052 3053 if (ret == 0) 3054 bus_for_each_drv(&hid_bus_type, NULL, NULL, 3055 __hid_bus_driver_added); 3056 3057 return ret; 3058 } 3059 EXPORT_SYMBOL_GPL(__hid_register_driver); 3060 3061 void hid_unregister_driver(struct hid_driver *hdrv) 3062 { 3063 driver_unregister(&hdrv->driver); 3064 hid_free_dynids(hdrv); 3065 3066 bus_for_each_drv(&hid_bus_type, NULL, hdrv, __bus_removed_driver); 3067 } 3068 EXPORT_SYMBOL_GPL(hid_unregister_driver); 3069 3070 int hid_check_keys_pressed(struct hid_device *hid) 3071 { 3072 struct hid_input *hidinput; 3073 int i; 3074 3075 if (!(hid->claimed & HID_CLAIMED_INPUT)) 3076 return 0; 3077 3078 list_for_each_entry(hidinput, &hid->inputs, list) { 3079 for (i = 0; i < BITS_TO_LONGS(KEY_MAX); i++) 3080 if (hidinput->input->key[i]) 3081 return 1; 3082 } 3083 3084 return 0; 3085 } 3086 EXPORT_SYMBOL_GPL(hid_check_keys_pressed); 3087 3088 #ifdef CONFIG_HID_BPF 3089 static const struct hid_ops __hid_ops = { 3090 .hid_get_report = hid_get_report, 3091 .hid_hw_raw_request = __hid_hw_raw_request, 3092 .hid_hw_output_report = __hid_hw_output_report, 3093 .hid_input_report = __hid_input_report, 3094 .owner = THIS_MODULE, 3095 .bus_type = &hid_bus_type, 3096 }; 3097 #endif 3098 3099 static int __init hid_init(void) 3100 { 3101 int ret; 3102 3103 ret = bus_register(&hid_bus_type); 3104 if (ret) { 3105 pr_err("can't register hid bus\n"); 3106 goto err; 3107 } 3108 3109 #ifdef CONFIG_HID_BPF 3110 hid_ops = &__hid_ops; 3111 #endif 3112 3113 ret = hidraw_init(); 3114 if (ret) 3115 goto err_bus; 3116 3117 hid_debug_init(); 3118 3119 return 0; 3120 err_bus: 3121 bus_unregister(&hid_bus_type); 3122 err: 3123 return ret; 3124 } 3125 3126 static void __exit hid_exit(void) 3127 { 3128 #ifdef CONFIG_HID_BPF 3129 hid_ops = NULL; 3130 #endif 3131 hid_debug_exit(); 3132 hidraw_exit(); 3133 bus_unregister(&hid_bus_type); 3134 hid_quirks_exit(HID_BUS_ANY); 3135 } 3136 3137 module_init(hid_init); 3138 module_exit(hid_exit); 3139 3140 MODULE_AUTHOR("Andreas Gal"); 3141 MODULE_AUTHOR("Vojtech Pavlik"); 3142 MODULE_AUTHOR("Jiri Kosina"); 3143 MODULE_DESCRIPTION("HID support for Linux"); 3144 MODULE_LICENSE("GPL"); 3145