1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * HID support for Linux 4 * 5 * Copyright (c) 1999 Andreas Gal 6 * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> 7 * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc 8 * Copyright (c) 2006-2012 Jiri Kosina 9 */ 10 11 /* 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 16 #include <linux/module.h> 17 #include <linux/slab.h> 18 #include <linux/init.h> 19 #include <linux/kernel.h> 20 #include <linux/list.h> 21 #include <linux/mm.h> 22 #include <linux/spinlock.h> 23 #include <asm/unaligned.h> 24 #include <asm/byteorder.h> 25 #include <linux/input.h> 26 #include <linux/wait.h> 27 #include <linux/vmalloc.h> 28 #include <linux/sched.h> 29 #include <linux/semaphore.h> 30 31 #include <linux/hid.h> 32 #include <linux/hiddev.h> 33 #include <linux/hid-debug.h> 34 #include <linux/hidraw.h> 35 36 #include "hid-ids.h" 37 38 /* 39 * Version Information 40 */ 41 42 #define DRIVER_DESC "HID core driver" 43 44 static int hid_ignore_special_drivers = 0; 45 module_param_named(ignore_special_drivers, hid_ignore_special_drivers, int, 0600); 46 MODULE_PARM_DESC(ignore_special_drivers, "Ignore any special drivers and handle all devices by generic driver"); 47 48 /* 49 * Register a new report for a device. 50 */ 51 52 struct hid_report *hid_register_report(struct hid_device *device, 53 enum hid_report_type type, unsigned int id, 54 unsigned int application) 55 { 56 struct hid_report_enum *report_enum = device->report_enum + type; 57 struct hid_report *report; 58 59 if (id >= HID_MAX_IDS) 60 return NULL; 61 if (report_enum->report_id_hash[id]) 62 return report_enum->report_id_hash[id]; 63 64 report = kzalloc(sizeof(struct hid_report), GFP_KERNEL); 65 if (!report) 66 return NULL; 67 68 if (id != 0) 69 report_enum->numbered = 1; 70 71 report->id = id; 72 report->type = type; 73 report->size = 0; 74 report->device = device; 75 report->application = application; 76 report_enum->report_id_hash[id] = report; 77 78 list_add_tail(&report->list, &report_enum->report_list); 79 INIT_LIST_HEAD(&report->field_entry_list); 80 81 return report; 82 } 83 EXPORT_SYMBOL_GPL(hid_register_report); 84 85 /* 86 * Register a new field for this report. 87 */ 88 89 static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages) 90 { 91 struct hid_field *field; 92 93 if (report->maxfield == HID_MAX_FIELDS) { 94 hid_err(report->device, "too many fields in report\n"); 95 return NULL; 96 } 97 98 field = kvzalloc((sizeof(struct hid_field) + 99 usages * sizeof(struct hid_usage) + 100 3 * usages * sizeof(unsigned int)), GFP_KERNEL); 101 if (!field) 102 return NULL; 103 104 field->index = report->maxfield++; 105 report->field[field->index] = field; 106 field->usage = (struct hid_usage *)(field + 1); 107 field->value = (s32 *)(field->usage + usages); 108 field->new_value = (s32 *)(field->value + usages); 109 field->usages_priorities = (s32 *)(field->new_value + usages); 110 field->report = report; 111 112 return field; 113 } 114 115 /* 116 * Open a collection. The type/usage is pushed on the stack. 117 */ 118 119 static int open_collection(struct hid_parser *parser, unsigned type) 120 { 121 struct hid_collection *collection; 122 unsigned usage; 123 int collection_index; 124 125 usage = parser->local.usage[0]; 126 127 if (parser->collection_stack_ptr == parser->collection_stack_size) { 128 unsigned int *collection_stack; 129 unsigned int new_size = parser->collection_stack_size + 130 HID_COLLECTION_STACK_SIZE; 131 132 collection_stack = krealloc(parser->collection_stack, 133 new_size * sizeof(unsigned int), 134 GFP_KERNEL); 135 if (!collection_stack) 136 return -ENOMEM; 137 138 parser->collection_stack = collection_stack; 139 parser->collection_stack_size = new_size; 140 } 141 142 if (parser->device->maxcollection == parser->device->collection_size) { 143 collection = kmalloc( 144 array3_size(sizeof(struct hid_collection), 145 parser->device->collection_size, 146 2), 147 GFP_KERNEL); 148 if (collection == NULL) { 149 hid_err(parser->device, "failed to reallocate collection array\n"); 150 return -ENOMEM; 151 } 152 memcpy(collection, parser->device->collection, 153 sizeof(struct hid_collection) * 154 parser->device->collection_size); 155 memset(collection + parser->device->collection_size, 0, 156 sizeof(struct hid_collection) * 157 parser->device->collection_size); 158 kfree(parser->device->collection); 159 parser->device->collection = collection; 160 parser->device->collection_size *= 2; 161 } 162 163 parser->collection_stack[parser->collection_stack_ptr++] = 164 parser->device->maxcollection; 165 166 collection_index = parser->device->maxcollection++; 167 collection = parser->device->collection + collection_index; 168 collection->type = type; 169 collection->usage = usage; 170 collection->level = parser->collection_stack_ptr - 1; 171 collection->parent_idx = (collection->level == 0) ? -1 : 172 parser->collection_stack[collection->level - 1]; 173 174 if (type == HID_COLLECTION_APPLICATION) 175 parser->device->maxapplication++; 176 177 return 0; 178 } 179 180 /* 181 * Close a collection. 182 */ 183 184 static int close_collection(struct hid_parser *parser) 185 { 186 if (!parser->collection_stack_ptr) { 187 hid_err(parser->device, "collection stack underflow\n"); 188 return -EINVAL; 189 } 190 parser->collection_stack_ptr--; 191 return 0; 192 } 193 194 /* 195 * Climb up the stack, search for the specified collection type 196 * and return the usage. 197 */ 198 199 static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type) 200 { 201 struct hid_collection *collection = parser->device->collection; 202 int n; 203 204 for (n = parser->collection_stack_ptr - 1; n >= 0; n--) { 205 unsigned index = parser->collection_stack[n]; 206 if (collection[index].type == type) 207 return collection[index].usage; 208 } 209 return 0; /* we know nothing about this usage type */ 210 } 211 212 /* 213 * Concatenate usage which defines 16 bits or less with the 214 * currently defined usage page to form a 32 bit usage 215 */ 216 217 static void complete_usage(struct hid_parser *parser, unsigned int index) 218 { 219 parser->local.usage[index] &= 0xFFFF; 220 parser->local.usage[index] |= 221 (parser->global.usage_page & 0xFFFF) << 16; 222 } 223 224 /* 225 * Add a usage to the temporary parser table. 226 */ 227 228 static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size) 229 { 230 if (parser->local.usage_index >= HID_MAX_USAGES) { 231 hid_err(parser->device, "usage index exceeded\n"); 232 return -1; 233 } 234 parser->local.usage[parser->local.usage_index] = usage; 235 236 /* 237 * If Usage item only includes usage id, concatenate it with 238 * currently defined usage page 239 */ 240 if (size <= 2) 241 complete_usage(parser, parser->local.usage_index); 242 243 parser->local.usage_size[parser->local.usage_index] = size; 244 parser->local.collection_index[parser->local.usage_index] = 245 parser->collection_stack_ptr ? 246 parser->collection_stack[parser->collection_stack_ptr - 1] : 0; 247 parser->local.usage_index++; 248 return 0; 249 } 250 251 /* 252 * Register a new field for this report. 253 */ 254 255 static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsigned flags) 256 { 257 struct hid_report *report; 258 struct hid_field *field; 259 unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE; 260 unsigned int usages; 261 unsigned int offset; 262 unsigned int i; 263 unsigned int application; 264 265 application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION); 266 267 report = hid_register_report(parser->device, report_type, 268 parser->global.report_id, application); 269 if (!report) { 270 hid_err(parser->device, "hid_register_report failed\n"); 271 return -1; 272 } 273 274 /* Handle both signed and unsigned cases properly */ 275 if ((parser->global.logical_minimum < 0 && 276 parser->global.logical_maximum < 277 parser->global.logical_minimum) || 278 (parser->global.logical_minimum >= 0 && 279 (__u32)parser->global.logical_maximum < 280 (__u32)parser->global.logical_minimum)) { 281 dbg_hid("logical range invalid 0x%x 0x%x\n", 282 parser->global.logical_minimum, 283 parser->global.logical_maximum); 284 return -1; 285 } 286 287 offset = report->size; 288 report->size += parser->global.report_size * parser->global.report_count; 289 290 if (parser->device->ll_driver->max_buffer_size) 291 max_buffer_size = parser->device->ll_driver->max_buffer_size; 292 293 /* Total size check: Allow for possible report index byte */ 294 if (report->size > (max_buffer_size - 1) << 3) { 295 hid_err(parser->device, "report is too long\n"); 296 return -1; 297 } 298 299 if (!parser->local.usage_index) /* Ignore padding fields */ 300 return 0; 301 302 usages = max_t(unsigned, parser->local.usage_index, 303 parser->global.report_count); 304 305 field = hid_register_field(report, usages); 306 if (!field) 307 return 0; 308 309 field->physical = hid_lookup_collection(parser, HID_COLLECTION_PHYSICAL); 310 field->logical = hid_lookup_collection(parser, HID_COLLECTION_LOGICAL); 311 field->application = application; 312 313 for (i = 0; i < usages; i++) { 314 unsigned j = i; 315 /* Duplicate the last usage we parsed if we have excess values */ 316 if (i >= parser->local.usage_index) 317 j = parser->local.usage_index - 1; 318 field->usage[i].hid = parser->local.usage[j]; 319 field->usage[i].collection_index = 320 parser->local.collection_index[j]; 321 field->usage[i].usage_index = i; 322 field->usage[i].resolution_multiplier = 1; 323 } 324 325 field->maxusage = usages; 326 field->flags = flags; 327 field->report_offset = offset; 328 field->report_type = report_type; 329 field->report_size = parser->global.report_size; 330 field->report_count = parser->global.report_count; 331 field->logical_minimum = parser->global.logical_minimum; 332 field->logical_maximum = parser->global.logical_maximum; 333 field->physical_minimum = parser->global.physical_minimum; 334 field->physical_maximum = parser->global.physical_maximum; 335 field->unit_exponent = parser->global.unit_exponent; 336 field->unit = parser->global.unit; 337 338 return 0; 339 } 340 341 /* 342 * Read data value from item. 343 */ 344 345 static u32 item_udata(struct hid_item *item) 346 { 347 switch (item->size) { 348 case 1: return item->data.u8; 349 case 2: return item->data.u16; 350 case 4: return item->data.u32; 351 } 352 return 0; 353 } 354 355 static s32 item_sdata(struct hid_item *item) 356 { 357 switch (item->size) { 358 case 1: return item->data.s8; 359 case 2: return item->data.s16; 360 case 4: return item->data.s32; 361 } 362 return 0; 363 } 364 365 /* 366 * Process a global item. 367 */ 368 369 static int hid_parser_global(struct hid_parser *parser, struct hid_item *item) 370 { 371 __s32 raw_value; 372 switch (item->tag) { 373 case HID_GLOBAL_ITEM_TAG_PUSH: 374 375 if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) { 376 hid_err(parser->device, "global environment stack overflow\n"); 377 return -1; 378 } 379 380 memcpy(parser->global_stack + parser->global_stack_ptr++, 381 &parser->global, sizeof(struct hid_global)); 382 return 0; 383 384 case HID_GLOBAL_ITEM_TAG_POP: 385 386 if (!parser->global_stack_ptr) { 387 hid_err(parser->device, "global environment stack underflow\n"); 388 return -1; 389 } 390 391 memcpy(&parser->global, parser->global_stack + 392 --parser->global_stack_ptr, sizeof(struct hid_global)); 393 return 0; 394 395 case HID_GLOBAL_ITEM_TAG_USAGE_PAGE: 396 parser->global.usage_page = item_udata(item); 397 return 0; 398 399 case HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM: 400 parser->global.logical_minimum = item_sdata(item); 401 return 0; 402 403 case HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM: 404 if (parser->global.logical_minimum < 0) 405 parser->global.logical_maximum = item_sdata(item); 406 else 407 parser->global.logical_maximum = item_udata(item); 408 return 0; 409 410 case HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM: 411 parser->global.physical_minimum = item_sdata(item); 412 return 0; 413 414 case HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM: 415 if (parser->global.physical_minimum < 0) 416 parser->global.physical_maximum = item_sdata(item); 417 else 418 parser->global.physical_maximum = item_udata(item); 419 return 0; 420 421 case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT: 422 /* Many devices provide unit exponent as a two's complement 423 * nibble due to the common misunderstanding of HID 424 * specification 1.11, 6.2.2.7 Global Items. Attempt to handle 425 * both this and the standard encoding. */ 426 raw_value = item_sdata(item); 427 if (!(raw_value & 0xfffffff0)) 428 parser->global.unit_exponent = hid_snto32(raw_value, 4); 429 else 430 parser->global.unit_exponent = raw_value; 431 return 0; 432 433 case HID_GLOBAL_ITEM_TAG_UNIT: 434 parser->global.unit = item_udata(item); 435 return 0; 436 437 case HID_GLOBAL_ITEM_TAG_REPORT_SIZE: 438 parser->global.report_size = item_udata(item); 439 if (parser->global.report_size > 256) { 440 hid_err(parser->device, "invalid report_size %d\n", 441 parser->global.report_size); 442 return -1; 443 } 444 return 0; 445 446 case HID_GLOBAL_ITEM_TAG_REPORT_COUNT: 447 parser->global.report_count = item_udata(item); 448 if (parser->global.report_count > HID_MAX_USAGES) { 449 hid_err(parser->device, "invalid report_count %d\n", 450 parser->global.report_count); 451 return -1; 452 } 453 return 0; 454 455 case HID_GLOBAL_ITEM_TAG_REPORT_ID: 456 parser->global.report_id = item_udata(item); 457 if (parser->global.report_id == 0 || 458 parser->global.report_id >= HID_MAX_IDS) { 459 hid_err(parser->device, "report_id %u is invalid\n", 460 parser->global.report_id); 461 return -1; 462 } 463 return 0; 464 465 default: 466 hid_err(parser->device, "unknown global tag 0x%x\n", item->tag); 467 return -1; 468 } 469 } 470 471 /* 472 * Process a local item. 473 */ 474 475 static int hid_parser_local(struct hid_parser *parser, struct hid_item *item) 476 { 477 __u32 data; 478 unsigned n; 479 __u32 count; 480 481 data = item_udata(item); 482 483 switch (item->tag) { 484 case HID_LOCAL_ITEM_TAG_DELIMITER: 485 486 if (data) { 487 /* 488 * We treat items before the first delimiter 489 * as global to all usage sets (branch 0). 490 * In the moment we process only these global 491 * items and the first delimiter set. 492 */ 493 if (parser->local.delimiter_depth != 0) { 494 hid_err(parser->device, "nested delimiters\n"); 495 return -1; 496 } 497 parser->local.delimiter_depth++; 498 parser->local.delimiter_branch++; 499 } else { 500 if (parser->local.delimiter_depth < 1) { 501 hid_err(parser->device, "bogus close delimiter\n"); 502 return -1; 503 } 504 parser->local.delimiter_depth--; 505 } 506 return 0; 507 508 case HID_LOCAL_ITEM_TAG_USAGE: 509 510 if (parser->local.delimiter_branch > 1) { 511 dbg_hid("alternative usage ignored\n"); 512 return 0; 513 } 514 515 return hid_add_usage(parser, data, item->size); 516 517 case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM: 518 519 if (parser->local.delimiter_branch > 1) { 520 dbg_hid("alternative usage ignored\n"); 521 return 0; 522 } 523 524 parser->local.usage_minimum = data; 525 return 0; 526 527 case HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM: 528 529 if (parser->local.delimiter_branch > 1) { 530 dbg_hid("alternative usage ignored\n"); 531 return 0; 532 } 533 534 count = data - parser->local.usage_minimum; 535 if (count + parser->local.usage_index >= HID_MAX_USAGES) { 536 /* 537 * We do not warn if the name is not set, we are 538 * actually pre-scanning the device. 539 */ 540 if (dev_name(&parser->device->dev)) 541 hid_warn(parser->device, 542 "ignoring exceeding usage max\n"); 543 data = HID_MAX_USAGES - parser->local.usage_index + 544 parser->local.usage_minimum - 1; 545 if (data <= 0) { 546 hid_err(parser->device, 547 "no more usage index available\n"); 548 return -1; 549 } 550 } 551 552 for (n = parser->local.usage_minimum; n <= data; n++) 553 if (hid_add_usage(parser, n, item->size)) { 554 dbg_hid("hid_add_usage failed\n"); 555 return -1; 556 } 557 return 0; 558 559 default: 560 561 dbg_hid("unknown local item tag 0x%x\n", item->tag); 562 return 0; 563 } 564 return 0; 565 } 566 567 /* 568 * Concatenate Usage Pages into Usages where relevant: 569 * As per specification, 6.2.2.8: "When the parser encounters a main item it 570 * concatenates the last declared Usage Page with a Usage to form a complete 571 * usage value." 572 */ 573 574 static void hid_concatenate_last_usage_page(struct hid_parser *parser) 575 { 576 int i; 577 unsigned int usage_page; 578 unsigned int current_page; 579 580 if (!parser->local.usage_index) 581 return; 582 583 usage_page = parser->global.usage_page; 584 585 /* 586 * Concatenate usage page again only if last declared Usage Page 587 * has not been already used in previous usages concatenation 588 */ 589 for (i = parser->local.usage_index - 1; i >= 0; i--) { 590 if (parser->local.usage_size[i] > 2) 591 /* Ignore extended usages */ 592 continue; 593 594 current_page = parser->local.usage[i] >> 16; 595 if (current_page == usage_page) 596 break; 597 598 complete_usage(parser, i); 599 } 600 } 601 602 /* 603 * Process a main item. 604 */ 605 606 static int hid_parser_main(struct hid_parser *parser, struct hid_item *item) 607 { 608 __u32 data; 609 int ret; 610 611 hid_concatenate_last_usage_page(parser); 612 613 data = item_udata(item); 614 615 switch (item->tag) { 616 case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION: 617 ret = open_collection(parser, data & 0xff); 618 break; 619 case HID_MAIN_ITEM_TAG_END_COLLECTION: 620 ret = close_collection(parser); 621 break; 622 case HID_MAIN_ITEM_TAG_INPUT: 623 ret = hid_add_field(parser, HID_INPUT_REPORT, data); 624 break; 625 case HID_MAIN_ITEM_TAG_OUTPUT: 626 ret = hid_add_field(parser, HID_OUTPUT_REPORT, data); 627 break; 628 case HID_MAIN_ITEM_TAG_FEATURE: 629 ret = hid_add_field(parser, HID_FEATURE_REPORT, data); 630 break; 631 default: 632 hid_warn(parser->device, "unknown main item tag 0x%x\n", item->tag); 633 ret = 0; 634 } 635 636 memset(&parser->local, 0, sizeof(parser->local)); /* Reset the local parser environment */ 637 638 return ret; 639 } 640 641 /* 642 * Process a reserved item. 643 */ 644 645 static int hid_parser_reserved(struct hid_parser *parser, struct hid_item *item) 646 { 647 dbg_hid("reserved item type, tag 0x%x\n", item->tag); 648 return 0; 649 } 650 651 /* 652 * Free a report and all registered fields. The field->usage and 653 * field->value table's are allocated behind the field, so we need 654 * only to free(field) itself. 655 */ 656 657 static void hid_free_report(struct hid_report *report) 658 { 659 unsigned n; 660 661 kfree(report->field_entries); 662 663 for (n = 0; n < report->maxfield; n++) 664 kvfree(report->field[n]); 665 kfree(report); 666 } 667 668 /* 669 * Close report. This function returns the device 670 * state to the point prior to hid_open_report(). 671 */ 672 static void hid_close_report(struct hid_device *device) 673 { 674 unsigned i, j; 675 676 for (i = 0; i < HID_REPORT_TYPES; i++) { 677 struct hid_report_enum *report_enum = device->report_enum + i; 678 679 for (j = 0; j < HID_MAX_IDS; j++) { 680 struct hid_report *report = report_enum->report_id_hash[j]; 681 if (report) 682 hid_free_report(report); 683 } 684 memset(report_enum, 0, sizeof(*report_enum)); 685 INIT_LIST_HEAD(&report_enum->report_list); 686 } 687 688 /* 689 * If the HID driver had a rdesc_fixup() callback, dev->rdesc 690 * will be allocated by hid-core and needs to be freed. 691 * Otherwise, it is either equal to dev_rdesc or bpf_rdesc, in 692 * which cases it'll be freed later on device removal or destroy. 693 */ 694 if (device->rdesc != device->dev_rdesc && device->rdesc != device->bpf_rdesc) 695 kfree(device->rdesc); 696 device->rdesc = NULL; 697 device->rsize = 0; 698 699 kfree(device->collection); 700 device->collection = NULL; 701 device->collection_size = 0; 702 device->maxcollection = 0; 703 device->maxapplication = 0; 704 705 device->status &= ~HID_STAT_PARSED; 706 } 707 708 static inline void hid_free_bpf_rdesc(struct hid_device *hdev) 709 { 710 /* bpf_rdesc is either equal to dev_rdesc or allocated by call_hid_bpf_rdesc_fixup() */ 711 if (hdev->bpf_rdesc != hdev->dev_rdesc) 712 kfree(hdev->bpf_rdesc); 713 hdev->bpf_rdesc = NULL; 714 } 715 716 /* 717 * Free a device structure, all reports, and all fields. 718 */ 719 720 void hiddev_free(struct kref *ref) 721 { 722 struct hid_device *hid = container_of(ref, struct hid_device, ref); 723 724 hid_close_report(hid); 725 hid_free_bpf_rdesc(hid); 726 kfree(hid->dev_rdesc); 727 kfree(hid); 728 } 729 730 static void hid_device_release(struct device *dev) 731 { 732 struct hid_device *hid = to_hid_device(dev); 733 734 kref_put(&hid->ref, hiddev_free); 735 } 736 737 /* 738 * Fetch a report description item from the data stream. We support long 739 * items, though they are not used yet. 740 */ 741 742 static const u8 *fetch_item(const __u8 *start, const __u8 *end, struct hid_item *item) 743 { 744 u8 b; 745 746 if ((end - start) <= 0) 747 return NULL; 748 749 b = *start++; 750 751 item->type = (b >> 2) & 3; 752 item->tag = (b >> 4) & 15; 753 754 if (item->tag == HID_ITEM_TAG_LONG) { 755 756 item->format = HID_ITEM_FORMAT_LONG; 757 758 if ((end - start) < 2) 759 return NULL; 760 761 item->size = *start++; 762 item->tag = *start++; 763 764 if ((end - start) < item->size) 765 return NULL; 766 767 item->data.longdata = start; 768 start += item->size; 769 return start; 770 } 771 772 item->format = HID_ITEM_FORMAT_SHORT; 773 item->size = b & 3; 774 775 switch (item->size) { 776 case 0: 777 return start; 778 779 case 1: 780 if ((end - start) < 1) 781 return NULL; 782 item->data.u8 = *start++; 783 return start; 784 785 case 2: 786 if ((end - start) < 2) 787 return NULL; 788 item->data.u16 = get_unaligned_le16(start); 789 start = (__u8 *)((__le16 *)start + 1); 790 return start; 791 792 case 3: 793 item->size++; 794 if ((end - start) < 4) 795 return NULL; 796 item->data.u32 = get_unaligned_le32(start); 797 start = (__u8 *)((__le32 *)start + 1); 798 return start; 799 } 800 801 return NULL; 802 } 803 804 static void hid_scan_input_usage(struct hid_parser *parser, u32 usage) 805 { 806 struct hid_device *hid = parser->device; 807 808 if (usage == HID_DG_CONTACTID) 809 hid->group = HID_GROUP_MULTITOUCH; 810 } 811 812 static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage) 813 { 814 if (usage == 0xff0000c5 && parser->global.report_count == 256 && 815 parser->global.report_size == 8) 816 parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8; 817 818 if (usage == 0xff0000c6 && parser->global.report_count == 1 && 819 parser->global.report_size == 8) 820 parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8; 821 } 822 823 static void hid_scan_collection(struct hid_parser *parser, unsigned type) 824 { 825 struct hid_device *hid = parser->device; 826 int i; 827 828 if (((parser->global.usage_page << 16) == HID_UP_SENSOR) && 829 (type == HID_COLLECTION_PHYSICAL || 830 type == HID_COLLECTION_APPLICATION)) 831 hid->group = HID_GROUP_SENSOR_HUB; 832 833 if (hid->vendor == USB_VENDOR_ID_MICROSOFT && 834 hid->product == USB_DEVICE_ID_MS_POWER_COVER && 835 hid->group == HID_GROUP_MULTITOUCH) 836 hid->group = HID_GROUP_GENERIC; 837 838 if ((parser->global.usage_page << 16) == HID_UP_GENDESK) 839 for (i = 0; i < parser->local.usage_index; i++) 840 if (parser->local.usage[i] == HID_GD_POINTER) 841 parser->scan_flags |= HID_SCAN_FLAG_GD_POINTER; 842 843 if ((parser->global.usage_page << 16) >= HID_UP_MSVENDOR) 844 parser->scan_flags |= HID_SCAN_FLAG_VENDOR_SPECIFIC; 845 846 if ((parser->global.usage_page << 16) == HID_UP_GOOGLEVENDOR) 847 for (i = 0; i < parser->local.usage_index; i++) 848 if (parser->local.usage[i] == 849 (HID_UP_GOOGLEVENDOR | 0x0001)) 850 parser->device->group = 851 HID_GROUP_VIVALDI; 852 } 853 854 static int hid_scan_main(struct hid_parser *parser, struct hid_item *item) 855 { 856 __u32 data; 857 int i; 858 859 hid_concatenate_last_usage_page(parser); 860 861 data = item_udata(item); 862 863 switch (item->tag) { 864 case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION: 865 hid_scan_collection(parser, data & 0xff); 866 break; 867 case HID_MAIN_ITEM_TAG_END_COLLECTION: 868 break; 869 case HID_MAIN_ITEM_TAG_INPUT: 870 /* ignore constant inputs, they will be ignored by hid-input */ 871 if (data & HID_MAIN_ITEM_CONSTANT) 872 break; 873 for (i = 0; i < parser->local.usage_index; i++) 874 hid_scan_input_usage(parser, parser->local.usage[i]); 875 break; 876 case HID_MAIN_ITEM_TAG_OUTPUT: 877 break; 878 case HID_MAIN_ITEM_TAG_FEATURE: 879 for (i = 0; i < parser->local.usage_index; i++) 880 hid_scan_feature_usage(parser, parser->local.usage[i]); 881 break; 882 } 883 884 /* Reset the local parser environment */ 885 memset(&parser->local, 0, sizeof(parser->local)); 886 887 return 0; 888 } 889 890 /* 891 * Scan a report descriptor before the device is added to the bus. 892 * Sets device groups and other properties that determine what driver 893 * to load. 894 */ 895 static int hid_scan_report(struct hid_device *hid) 896 { 897 struct hid_parser *parser; 898 struct hid_item item; 899 const __u8 *start = hid->dev_rdesc; 900 const __u8 *end = start + hid->dev_rsize; 901 static int (*dispatch_type[])(struct hid_parser *parser, 902 struct hid_item *item) = { 903 hid_scan_main, 904 hid_parser_global, 905 hid_parser_local, 906 hid_parser_reserved 907 }; 908 909 parser = vzalloc(sizeof(struct hid_parser)); 910 if (!parser) 911 return -ENOMEM; 912 913 parser->device = hid; 914 hid->group = HID_GROUP_GENERIC; 915 916 /* 917 * The parsing is simpler than the one in hid_open_report() as we should 918 * be robust against hid errors. Those errors will be raised by 919 * hid_open_report() anyway. 920 */ 921 while ((start = fetch_item(start, end, &item)) != NULL) 922 dispatch_type[item.type](parser, &item); 923 924 /* 925 * Handle special flags set during scanning. 926 */ 927 if ((parser->scan_flags & HID_SCAN_FLAG_MT_WIN_8) && 928 (hid->group == HID_GROUP_MULTITOUCH)) 929 hid->group = HID_GROUP_MULTITOUCH_WIN_8; 930 931 /* 932 * Vendor specific handlings 933 */ 934 switch (hid->vendor) { 935 case USB_VENDOR_ID_WACOM: 936 hid->group = HID_GROUP_WACOM; 937 break; 938 case USB_VENDOR_ID_SYNAPTICS: 939 if (hid->group == HID_GROUP_GENERIC) 940 if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC) 941 && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER)) 942 /* 943 * hid-rmi should take care of them, 944 * not hid-generic 945 */ 946 hid->group = HID_GROUP_RMI; 947 break; 948 } 949 950 kfree(parser->collection_stack); 951 vfree(parser); 952 return 0; 953 } 954 955 /** 956 * hid_parse_report - parse device report 957 * 958 * @hid: hid device 959 * @start: report start 960 * @size: report size 961 * 962 * Allocate the device report as read by the bus driver. This function should 963 * only be called from parse() in ll drivers. 964 */ 965 int hid_parse_report(struct hid_device *hid, const __u8 *start, unsigned size) 966 { 967 hid->dev_rdesc = kmemdup(start, size, GFP_KERNEL); 968 if (!hid->dev_rdesc) 969 return -ENOMEM; 970 hid->dev_rsize = size; 971 return 0; 972 } 973 EXPORT_SYMBOL_GPL(hid_parse_report); 974 975 static const char * const hid_report_names[] = { 976 "HID_INPUT_REPORT", 977 "HID_OUTPUT_REPORT", 978 "HID_FEATURE_REPORT", 979 }; 980 /** 981 * hid_validate_values - validate existing device report's value indexes 982 * 983 * @hid: hid device 984 * @type: which report type to examine 985 * @id: which report ID to examine (0 for first) 986 * @field_index: which report field to examine 987 * @report_counts: expected number of values 988 * 989 * Validate the number of values in a given field of a given report, after 990 * parsing. 991 */ 992 struct hid_report *hid_validate_values(struct hid_device *hid, 993 enum hid_report_type type, unsigned int id, 994 unsigned int field_index, 995 unsigned int report_counts) 996 { 997 struct hid_report *report; 998 999 if (type > HID_FEATURE_REPORT) { 1000 hid_err(hid, "invalid HID report type %u\n", type); 1001 return NULL; 1002 } 1003 1004 if (id >= HID_MAX_IDS) { 1005 hid_err(hid, "invalid HID report id %u\n", id); 1006 return NULL; 1007 } 1008 1009 /* 1010 * Explicitly not using hid_get_report() here since it depends on 1011 * ->numbered being checked, which may not always be the case when 1012 * drivers go to access report values. 1013 */ 1014 if (id == 0) { 1015 /* 1016 * Validating on id 0 means we should examine the first 1017 * report in the list. 1018 */ 1019 report = list_first_entry_or_null( 1020 &hid->report_enum[type].report_list, 1021 struct hid_report, list); 1022 } else { 1023 report = hid->report_enum[type].report_id_hash[id]; 1024 } 1025 if (!report) { 1026 hid_err(hid, "missing %s %u\n", hid_report_names[type], id); 1027 return NULL; 1028 } 1029 if (report->maxfield <= field_index) { 1030 hid_err(hid, "not enough fields in %s %u\n", 1031 hid_report_names[type], id); 1032 return NULL; 1033 } 1034 if (report->field[field_index]->report_count < report_counts) { 1035 hid_err(hid, "not enough values in %s %u field %u\n", 1036 hid_report_names[type], id, field_index); 1037 return NULL; 1038 } 1039 return report; 1040 } 1041 EXPORT_SYMBOL_GPL(hid_validate_values); 1042 1043 static int hid_calculate_multiplier(struct hid_device *hid, 1044 struct hid_field *multiplier) 1045 { 1046 int m; 1047 __s32 v = *multiplier->value; 1048 __s32 lmin = multiplier->logical_minimum; 1049 __s32 lmax = multiplier->logical_maximum; 1050 __s32 pmin = multiplier->physical_minimum; 1051 __s32 pmax = multiplier->physical_maximum; 1052 1053 /* 1054 * "Because OS implementations will generally divide the control's 1055 * reported count by the Effective Resolution Multiplier, designers 1056 * should take care not to establish a potential Effective 1057 * Resolution Multiplier of zero." 1058 * HID Usage Table, v1.12, Section 4.3.1, p31 1059 */ 1060 if (lmax - lmin == 0) 1061 return 1; 1062 /* 1063 * Handling the unit exponent is left as an exercise to whoever 1064 * finds a device where that exponent is not 0. 1065 */ 1066 m = ((v - lmin)/(lmax - lmin) * (pmax - pmin) + pmin); 1067 if (unlikely(multiplier->unit_exponent != 0)) { 1068 hid_warn(hid, 1069 "unsupported Resolution Multiplier unit exponent %d\n", 1070 multiplier->unit_exponent); 1071 } 1072 1073 /* There are no devices with an effective multiplier > 255 */ 1074 if (unlikely(m == 0 || m > 255 || m < -255)) { 1075 hid_warn(hid, "unsupported Resolution Multiplier %d\n", m); 1076 m = 1; 1077 } 1078 1079 return m; 1080 } 1081 1082 static void hid_apply_multiplier_to_field(struct hid_device *hid, 1083 struct hid_field *field, 1084 struct hid_collection *multiplier_collection, 1085 int effective_multiplier) 1086 { 1087 struct hid_collection *collection; 1088 struct hid_usage *usage; 1089 int i; 1090 1091 /* 1092 * If multiplier_collection is NULL, the multiplier applies 1093 * to all fields in the report. 1094 * Otherwise, it is the Logical Collection the multiplier applies to 1095 * but our field may be in a subcollection of that collection. 1096 */ 1097 for (i = 0; i < field->maxusage; i++) { 1098 usage = &field->usage[i]; 1099 1100 collection = &hid->collection[usage->collection_index]; 1101 while (collection->parent_idx != -1 && 1102 collection != multiplier_collection) 1103 collection = &hid->collection[collection->parent_idx]; 1104 1105 if (collection->parent_idx != -1 || 1106 multiplier_collection == NULL) 1107 usage->resolution_multiplier = effective_multiplier; 1108 1109 } 1110 } 1111 1112 static void hid_apply_multiplier(struct hid_device *hid, 1113 struct hid_field *multiplier) 1114 { 1115 struct hid_report_enum *rep_enum; 1116 struct hid_report *rep; 1117 struct hid_field *field; 1118 struct hid_collection *multiplier_collection; 1119 int effective_multiplier; 1120 int i; 1121 1122 /* 1123 * "The Resolution Multiplier control must be contained in the same 1124 * Logical Collection as the control(s) to which it is to be applied. 1125 * If no Resolution Multiplier is defined, then the Resolution 1126 * Multiplier defaults to 1. If more than one control exists in a 1127 * Logical Collection, the Resolution Multiplier is associated with 1128 * all controls in the collection. If no Logical Collection is 1129 * defined, the Resolution Multiplier is associated with all 1130 * controls in the report." 1131 * HID Usage Table, v1.12, Section 4.3.1, p30 1132 * 1133 * Thus, search from the current collection upwards until we find a 1134 * logical collection. Then search all fields for that same parent 1135 * collection. Those are the fields the multiplier applies to. 1136 * 1137 * If we have more than one multiplier, it will overwrite the 1138 * applicable fields later. 1139 */ 1140 multiplier_collection = &hid->collection[multiplier->usage->collection_index]; 1141 while (multiplier_collection->parent_idx != -1 && 1142 multiplier_collection->type != HID_COLLECTION_LOGICAL) 1143 multiplier_collection = &hid->collection[multiplier_collection->parent_idx]; 1144 1145 effective_multiplier = hid_calculate_multiplier(hid, multiplier); 1146 1147 rep_enum = &hid->report_enum[HID_INPUT_REPORT]; 1148 list_for_each_entry(rep, &rep_enum->report_list, list) { 1149 for (i = 0; i < rep->maxfield; i++) { 1150 field = rep->field[i]; 1151 hid_apply_multiplier_to_field(hid, field, 1152 multiplier_collection, 1153 effective_multiplier); 1154 } 1155 } 1156 } 1157 1158 /* 1159 * hid_setup_resolution_multiplier - set up all resolution multipliers 1160 * 1161 * @device: hid device 1162 * 1163 * Search for all Resolution Multiplier Feature Reports and apply their 1164 * value to all matching Input items. This only updates the internal struct 1165 * fields. 1166 * 1167 * The Resolution Multiplier is applied by the hardware. If the multiplier 1168 * is anything other than 1, the hardware will send pre-multiplied events 1169 * so that the same physical interaction generates an accumulated 1170 * accumulated_value = value * * multiplier 1171 * This may be achieved by sending 1172 * - "value * multiplier" for each event, or 1173 * - "value" but "multiplier" times as frequently, or 1174 * - a combination of the above 1175 * The only guarantee is that the same physical interaction always generates 1176 * an accumulated 'value * multiplier'. 1177 * 1178 * This function must be called before any event processing and after 1179 * any SetRequest to the Resolution Multiplier. 1180 */ 1181 void hid_setup_resolution_multiplier(struct hid_device *hid) 1182 { 1183 struct hid_report_enum *rep_enum; 1184 struct hid_report *rep; 1185 struct hid_usage *usage; 1186 int i, j; 1187 1188 rep_enum = &hid->report_enum[HID_FEATURE_REPORT]; 1189 list_for_each_entry(rep, &rep_enum->report_list, list) { 1190 for (i = 0; i < rep->maxfield; i++) { 1191 /* Ignore if report count is out of bounds. */ 1192 if (rep->field[i]->report_count < 1) 1193 continue; 1194 1195 for (j = 0; j < rep->field[i]->maxusage; j++) { 1196 usage = &rep->field[i]->usage[j]; 1197 if (usage->hid == HID_GD_RESOLUTION_MULTIPLIER) 1198 hid_apply_multiplier(hid, 1199 rep->field[i]); 1200 } 1201 } 1202 } 1203 } 1204 EXPORT_SYMBOL_GPL(hid_setup_resolution_multiplier); 1205 1206 /** 1207 * hid_open_report - open a driver-specific device report 1208 * 1209 * @device: hid device 1210 * 1211 * Parse a report description into a hid_device structure. Reports are 1212 * enumerated, fields are attached to these reports. 1213 * 0 returned on success, otherwise nonzero error value. 1214 * 1215 * This function (or the equivalent hid_parse() macro) should only be 1216 * called from probe() in drivers, before starting the device. 1217 */ 1218 int hid_open_report(struct hid_device *device) 1219 { 1220 struct hid_parser *parser; 1221 struct hid_item item; 1222 unsigned int size; 1223 const __u8 *start; 1224 const __u8 *end; 1225 const __u8 *next; 1226 int ret; 1227 int i; 1228 static int (*dispatch_type[])(struct hid_parser *parser, 1229 struct hid_item *item) = { 1230 hid_parser_main, 1231 hid_parser_global, 1232 hid_parser_local, 1233 hid_parser_reserved 1234 }; 1235 1236 if (WARN_ON(device->status & HID_STAT_PARSED)) 1237 return -EBUSY; 1238 1239 start = device->bpf_rdesc; 1240 if (WARN_ON(!start)) 1241 return -ENODEV; 1242 size = device->bpf_rsize; 1243 1244 if (device->driver->report_fixup) { 1245 /* 1246 * device->driver->report_fixup() needs to work 1247 * on a copy of our report descriptor so it can 1248 * change it. 1249 */ 1250 __u8 *buf = kmemdup(start, size, GFP_KERNEL); 1251 1252 if (buf == NULL) 1253 return -ENOMEM; 1254 1255 start = device->driver->report_fixup(device, buf, &size); 1256 1257 /* 1258 * The second kmemdup is required in case report_fixup() returns 1259 * a static read-only memory, but we have no idea if that memory 1260 * needs to be cleaned up or not at the end. 1261 */ 1262 start = kmemdup(start, size, GFP_KERNEL); 1263 kfree(buf); 1264 if (start == NULL) 1265 return -ENOMEM; 1266 } 1267 1268 device->rdesc = start; 1269 device->rsize = size; 1270 1271 parser = vzalloc(sizeof(struct hid_parser)); 1272 if (!parser) { 1273 ret = -ENOMEM; 1274 goto alloc_err; 1275 } 1276 1277 parser->device = device; 1278 1279 end = start + size; 1280 1281 device->collection = kcalloc(HID_DEFAULT_NUM_COLLECTIONS, 1282 sizeof(struct hid_collection), GFP_KERNEL); 1283 if (!device->collection) { 1284 ret = -ENOMEM; 1285 goto err; 1286 } 1287 device->collection_size = HID_DEFAULT_NUM_COLLECTIONS; 1288 for (i = 0; i < HID_DEFAULT_NUM_COLLECTIONS; i++) 1289 device->collection[i].parent_idx = -1; 1290 1291 ret = -EINVAL; 1292 while ((next = fetch_item(start, end, &item)) != NULL) { 1293 start = next; 1294 1295 if (item.format != HID_ITEM_FORMAT_SHORT) { 1296 hid_err(device, "unexpected long global item\n"); 1297 goto err; 1298 } 1299 1300 if (dispatch_type[item.type](parser, &item)) { 1301 hid_err(device, "item %u %u %u %u parsing failed\n", 1302 item.format, (unsigned)item.size, 1303 (unsigned)item.type, (unsigned)item.tag); 1304 goto err; 1305 } 1306 1307 if (start == end) { 1308 if (parser->collection_stack_ptr) { 1309 hid_err(device, "unbalanced collection at end of report description\n"); 1310 goto err; 1311 } 1312 if (parser->local.delimiter_depth) { 1313 hid_err(device, "unbalanced delimiter at end of report description\n"); 1314 goto err; 1315 } 1316 1317 /* 1318 * fetch initial values in case the device's 1319 * default multiplier isn't the recommended 1 1320 */ 1321 hid_setup_resolution_multiplier(device); 1322 1323 kfree(parser->collection_stack); 1324 vfree(parser); 1325 device->status |= HID_STAT_PARSED; 1326 1327 return 0; 1328 } 1329 } 1330 1331 hid_err(device, "item fetching failed at offset %u/%u\n", 1332 size - (unsigned int)(end - start), size); 1333 err: 1334 kfree(parser->collection_stack); 1335 alloc_err: 1336 vfree(parser); 1337 hid_close_report(device); 1338 return ret; 1339 } 1340 EXPORT_SYMBOL_GPL(hid_open_report); 1341 1342 /* 1343 * Convert a signed n-bit integer to signed 32-bit integer. Common 1344 * cases are done through the compiler, the screwed things has to be 1345 * done by hand. 1346 */ 1347 1348 static s32 snto32(__u32 value, unsigned n) 1349 { 1350 if (!value || !n) 1351 return 0; 1352 1353 if (n > 32) 1354 n = 32; 1355 1356 switch (n) { 1357 case 8: return ((__s8)value); 1358 case 16: return ((__s16)value); 1359 case 32: return ((__s32)value); 1360 } 1361 return value & (1 << (n - 1)) ? value | (~0U << n) : value; 1362 } 1363 1364 s32 hid_snto32(__u32 value, unsigned n) 1365 { 1366 return snto32(value, n); 1367 } 1368 EXPORT_SYMBOL_GPL(hid_snto32); 1369 1370 /* 1371 * Convert a signed 32-bit integer to a signed n-bit integer. 1372 */ 1373 1374 static u32 s32ton(__s32 value, unsigned n) 1375 { 1376 s32 a = value >> (n - 1); 1377 if (a && a != -1) 1378 return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1; 1379 return value & ((1 << n) - 1); 1380 } 1381 1382 /* 1383 * Extract/implement a data field from/to a little endian report (bit array). 1384 * 1385 * Code sort-of follows HID spec: 1386 * http://www.usb.org/developers/hidpage/HID1_11.pdf 1387 * 1388 * While the USB HID spec allows unlimited length bit fields in "report 1389 * descriptors", most devices never use more than 16 bits. 1390 * One model of UPS is claimed to report "LINEV" as a 32-bit field. 1391 * Search linux-kernel and linux-usb-devel archives for "hid-core extract". 1392 */ 1393 1394 static u32 __extract(u8 *report, unsigned offset, int n) 1395 { 1396 unsigned int idx = offset / 8; 1397 unsigned int bit_nr = 0; 1398 unsigned int bit_shift = offset % 8; 1399 int bits_to_copy = 8 - bit_shift; 1400 u32 value = 0; 1401 u32 mask = n < 32 ? (1U << n) - 1 : ~0U; 1402 1403 while (n > 0) { 1404 value |= ((u32)report[idx] >> bit_shift) << bit_nr; 1405 n -= bits_to_copy; 1406 bit_nr += bits_to_copy; 1407 bits_to_copy = 8; 1408 bit_shift = 0; 1409 idx++; 1410 } 1411 1412 return value & mask; 1413 } 1414 1415 u32 hid_field_extract(const struct hid_device *hid, u8 *report, 1416 unsigned offset, unsigned n) 1417 { 1418 if (n > 32) { 1419 hid_warn_once(hid, "%s() called with n (%d) > 32! (%s)\n", 1420 __func__, n, current->comm); 1421 n = 32; 1422 } 1423 1424 return __extract(report, offset, n); 1425 } 1426 EXPORT_SYMBOL_GPL(hid_field_extract); 1427 1428 /* 1429 * "implement" : set bits in a little endian bit stream. 1430 * Same concepts as "extract" (see comments above). 1431 * The data mangled in the bit stream remains in little endian 1432 * order the whole time. It make more sense to talk about 1433 * endianness of register values by considering a register 1434 * a "cached" copy of the little endian bit stream. 1435 */ 1436 1437 static void __implement(u8 *report, unsigned offset, int n, u32 value) 1438 { 1439 unsigned int idx = offset / 8; 1440 unsigned int bit_shift = offset % 8; 1441 int bits_to_set = 8 - bit_shift; 1442 1443 while (n - bits_to_set >= 0) { 1444 report[idx] &= ~(0xff << bit_shift); 1445 report[idx] |= value << bit_shift; 1446 value >>= bits_to_set; 1447 n -= bits_to_set; 1448 bits_to_set = 8; 1449 bit_shift = 0; 1450 idx++; 1451 } 1452 1453 /* last nibble */ 1454 if (n) { 1455 u8 bit_mask = ((1U << n) - 1); 1456 report[idx] &= ~(bit_mask << bit_shift); 1457 report[idx] |= value << bit_shift; 1458 } 1459 } 1460 1461 static void implement(const struct hid_device *hid, u8 *report, 1462 unsigned offset, unsigned n, u32 value) 1463 { 1464 if (unlikely(n > 32)) { 1465 hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n", 1466 __func__, n, current->comm); 1467 n = 32; 1468 } else if (n < 32) { 1469 u32 m = (1U << n) - 1; 1470 1471 if (unlikely(value > m)) { 1472 hid_warn(hid, 1473 "%s() called with too large value %d (n: %d)! (%s)\n", 1474 __func__, value, n, current->comm); 1475 value &= m; 1476 } 1477 } 1478 1479 __implement(report, offset, n, value); 1480 } 1481 1482 /* 1483 * Search an array for a value. 1484 */ 1485 1486 static int search(__s32 *array, __s32 value, unsigned n) 1487 { 1488 while (n--) { 1489 if (*array++ == value) 1490 return 0; 1491 } 1492 return -1; 1493 } 1494 1495 /** 1496 * hid_match_report - check if driver's raw_event should be called 1497 * 1498 * @hid: hid device 1499 * @report: hid report to match against 1500 * 1501 * compare hid->driver->report_table->report_type to report->type 1502 */ 1503 static int hid_match_report(struct hid_device *hid, struct hid_report *report) 1504 { 1505 const struct hid_report_id *id = hid->driver->report_table; 1506 1507 if (!id) /* NULL means all */ 1508 return 1; 1509 1510 for (; id->report_type != HID_TERMINATOR; id++) 1511 if (id->report_type == HID_ANY_ID || 1512 id->report_type == report->type) 1513 return 1; 1514 return 0; 1515 } 1516 1517 /** 1518 * hid_match_usage - check if driver's event should be called 1519 * 1520 * @hid: hid device 1521 * @usage: usage to match against 1522 * 1523 * compare hid->driver->usage_table->usage_{type,code} to 1524 * usage->usage_{type,code} 1525 */ 1526 static int hid_match_usage(struct hid_device *hid, struct hid_usage *usage) 1527 { 1528 const struct hid_usage_id *id = hid->driver->usage_table; 1529 1530 if (!id) /* NULL means all */ 1531 return 1; 1532 1533 for (; id->usage_type != HID_ANY_ID - 1; id++) 1534 if ((id->usage_hid == HID_ANY_ID || 1535 id->usage_hid == usage->hid) && 1536 (id->usage_type == HID_ANY_ID || 1537 id->usage_type == usage->type) && 1538 (id->usage_code == HID_ANY_ID || 1539 id->usage_code == usage->code)) 1540 return 1; 1541 return 0; 1542 } 1543 1544 static void hid_process_event(struct hid_device *hid, struct hid_field *field, 1545 struct hid_usage *usage, __s32 value, int interrupt) 1546 { 1547 struct hid_driver *hdrv = hid->driver; 1548 int ret; 1549 1550 if (!list_empty(&hid->debug_list)) 1551 hid_dump_input(hid, usage, value); 1552 1553 if (hdrv && hdrv->event && hid_match_usage(hid, usage)) { 1554 ret = hdrv->event(hid, field, usage, value); 1555 if (ret != 0) { 1556 if (ret < 0) 1557 hid_err(hid, "%s's event failed with %d\n", 1558 hdrv->name, ret); 1559 return; 1560 } 1561 } 1562 1563 if (hid->claimed & HID_CLAIMED_INPUT) 1564 hidinput_hid_event(hid, field, usage, value); 1565 if (hid->claimed & HID_CLAIMED_HIDDEV && interrupt && hid->hiddev_hid_event) 1566 hid->hiddev_hid_event(hid, field, usage, value); 1567 } 1568 1569 /* 1570 * Checks if the given value is valid within this field 1571 */ 1572 static inline int hid_array_value_is_valid(struct hid_field *field, 1573 __s32 value) 1574 { 1575 __s32 min = field->logical_minimum; 1576 1577 /* 1578 * Value needs to be between logical min and max, and 1579 * (value - min) is used as an index in the usage array. 1580 * This array is of size field->maxusage 1581 */ 1582 return value >= min && 1583 value <= field->logical_maximum && 1584 value - min < field->maxusage; 1585 } 1586 1587 /* 1588 * Fetch the field from the data. The field content is stored for next 1589 * report processing (we do differential reporting to the layer). 1590 */ 1591 static void hid_input_fetch_field(struct hid_device *hid, 1592 struct hid_field *field, 1593 __u8 *data) 1594 { 1595 unsigned n; 1596 unsigned count = field->report_count; 1597 unsigned offset = field->report_offset; 1598 unsigned size = field->report_size; 1599 __s32 min = field->logical_minimum; 1600 __s32 *value; 1601 1602 value = field->new_value; 1603 memset(value, 0, count * sizeof(__s32)); 1604 field->ignored = false; 1605 1606 for (n = 0; n < count; n++) { 1607 1608 value[n] = min < 0 ? 1609 snto32(hid_field_extract(hid, data, offset + n * size, 1610 size), size) : 1611 hid_field_extract(hid, data, offset + n * size, size); 1612 1613 /* Ignore report if ErrorRollOver */ 1614 if (!(field->flags & HID_MAIN_ITEM_VARIABLE) && 1615 hid_array_value_is_valid(field, value[n]) && 1616 field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1) { 1617 field->ignored = true; 1618 return; 1619 } 1620 } 1621 } 1622 1623 /* 1624 * Process a received variable field. 1625 */ 1626 1627 static void hid_input_var_field(struct hid_device *hid, 1628 struct hid_field *field, 1629 int interrupt) 1630 { 1631 unsigned int count = field->report_count; 1632 __s32 *value = field->new_value; 1633 unsigned int n; 1634 1635 for (n = 0; n < count; n++) 1636 hid_process_event(hid, 1637 field, 1638 &field->usage[n], 1639 value[n], 1640 interrupt); 1641 1642 memcpy(field->value, value, count * sizeof(__s32)); 1643 } 1644 1645 /* 1646 * Process a received array field. The field content is stored for 1647 * next report processing (we do differential reporting to the layer). 1648 */ 1649 1650 static void hid_input_array_field(struct hid_device *hid, 1651 struct hid_field *field, 1652 int interrupt) 1653 { 1654 unsigned int n; 1655 unsigned int count = field->report_count; 1656 __s32 min = field->logical_minimum; 1657 __s32 *value; 1658 1659 value = field->new_value; 1660 1661 /* ErrorRollOver */ 1662 if (field->ignored) 1663 return; 1664 1665 for (n = 0; n < count; n++) { 1666 if (hid_array_value_is_valid(field, field->value[n]) && 1667 search(value, field->value[n], count)) 1668 hid_process_event(hid, 1669 field, 1670 &field->usage[field->value[n] - min], 1671 0, 1672 interrupt); 1673 1674 if (hid_array_value_is_valid(field, value[n]) && 1675 search(field->value, value[n], count)) 1676 hid_process_event(hid, 1677 field, 1678 &field->usage[value[n] - min], 1679 1, 1680 interrupt); 1681 } 1682 1683 memcpy(field->value, value, count * sizeof(__s32)); 1684 } 1685 1686 /* 1687 * Analyse a received report, and fetch the data from it. The field 1688 * content is stored for next report processing (we do differential 1689 * reporting to the layer). 1690 */ 1691 static void hid_process_report(struct hid_device *hid, 1692 struct hid_report *report, 1693 __u8 *data, 1694 int interrupt) 1695 { 1696 unsigned int a; 1697 struct hid_field_entry *entry; 1698 struct hid_field *field; 1699 1700 /* first retrieve all incoming values in data */ 1701 for (a = 0; a < report->maxfield; a++) 1702 hid_input_fetch_field(hid, report->field[a], data); 1703 1704 if (!list_empty(&report->field_entry_list)) { 1705 /* INPUT_REPORT, we have a priority list of fields */ 1706 list_for_each_entry(entry, 1707 &report->field_entry_list, 1708 list) { 1709 field = entry->field; 1710 1711 if (field->flags & HID_MAIN_ITEM_VARIABLE) 1712 hid_process_event(hid, 1713 field, 1714 &field->usage[entry->index], 1715 field->new_value[entry->index], 1716 interrupt); 1717 else 1718 hid_input_array_field(hid, field, interrupt); 1719 } 1720 1721 /* we need to do the memcpy at the end for var items */ 1722 for (a = 0; a < report->maxfield; a++) { 1723 field = report->field[a]; 1724 1725 if (field->flags & HID_MAIN_ITEM_VARIABLE) 1726 memcpy(field->value, field->new_value, 1727 field->report_count * sizeof(__s32)); 1728 } 1729 } else { 1730 /* FEATURE_REPORT, regular processing */ 1731 for (a = 0; a < report->maxfield; a++) { 1732 field = report->field[a]; 1733 1734 if (field->flags & HID_MAIN_ITEM_VARIABLE) 1735 hid_input_var_field(hid, field, interrupt); 1736 else 1737 hid_input_array_field(hid, field, interrupt); 1738 } 1739 } 1740 } 1741 1742 /* 1743 * Insert a given usage_index in a field in the list 1744 * of processed usages in the report. 1745 * 1746 * The elements of lower priority score are processed 1747 * first. 1748 */ 1749 static void __hid_insert_field_entry(struct hid_device *hid, 1750 struct hid_report *report, 1751 struct hid_field_entry *entry, 1752 struct hid_field *field, 1753 unsigned int usage_index) 1754 { 1755 struct hid_field_entry *next; 1756 1757 entry->field = field; 1758 entry->index = usage_index; 1759 entry->priority = field->usages_priorities[usage_index]; 1760 1761 /* insert the element at the correct position */ 1762 list_for_each_entry(next, 1763 &report->field_entry_list, 1764 list) { 1765 /* 1766 * the priority of our element is strictly higher 1767 * than the next one, insert it before 1768 */ 1769 if (entry->priority > next->priority) { 1770 list_add_tail(&entry->list, &next->list); 1771 return; 1772 } 1773 } 1774 1775 /* lowest priority score: insert at the end */ 1776 list_add_tail(&entry->list, &report->field_entry_list); 1777 } 1778 1779 static void hid_report_process_ordering(struct hid_device *hid, 1780 struct hid_report *report) 1781 { 1782 struct hid_field *field; 1783 struct hid_field_entry *entries; 1784 unsigned int a, u, usages; 1785 unsigned int count = 0; 1786 1787 /* count the number of individual fields in the report */ 1788 for (a = 0; a < report->maxfield; a++) { 1789 field = report->field[a]; 1790 1791 if (field->flags & HID_MAIN_ITEM_VARIABLE) 1792 count += field->report_count; 1793 else 1794 count++; 1795 } 1796 1797 /* allocate the memory to process the fields */ 1798 entries = kcalloc(count, sizeof(*entries), GFP_KERNEL); 1799 if (!entries) 1800 return; 1801 1802 report->field_entries = entries; 1803 1804 /* 1805 * walk through all fields in the report and 1806 * store them by priority order in report->field_entry_list 1807 * 1808 * - Var elements are individualized (field + usage_index) 1809 * - Arrays are taken as one, we can not chose an order for them 1810 */ 1811 usages = 0; 1812 for (a = 0; a < report->maxfield; a++) { 1813 field = report->field[a]; 1814 1815 if (field->flags & HID_MAIN_ITEM_VARIABLE) { 1816 for (u = 0; u < field->report_count; u++) { 1817 __hid_insert_field_entry(hid, report, 1818 &entries[usages], 1819 field, u); 1820 usages++; 1821 } 1822 } else { 1823 __hid_insert_field_entry(hid, report, &entries[usages], 1824 field, 0); 1825 usages++; 1826 } 1827 } 1828 } 1829 1830 static void hid_process_ordering(struct hid_device *hid) 1831 { 1832 struct hid_report *report; 1833 struct hid_report_enum *report_enum = &hid->report_enum[HID_INPUT_REPORT]; 1834 1835 list_for_each_entry(report, &report_enum->report_list, list) 1836 hid_report_process_ordering(hid, report); 1837 } 1838 1839 /* 1840 * Output the field into the report. 1841 */ 1842 1843 static void hid_output_field(const struct hid_device *hid, 1844 struct hid_field *field, __u8 *data) 1845 { 1846 unsigned count = field->report_count; 1847 unsigned offset = field->report_offset; 1848 unsigned size = field->report_size; 1849 unsigned n; 1850 1851 for (n = 0; n < count; n++) { 1852 if (field->logical_minimum < 0) /* signed values */ 1853 implement(hid, data, offset + n * size, size, 1854 s32ton(field->value[n], size)); 1855 else /* unsigned values */ 1856 implement(hid, data, offset + n * size, size, 1857 field->value[n]); 1858 } 1859 } 1860 1861 /* 1862 * Compute the size of a report. 1863 */ 1864 static size_t hid_compute_report_size(struct hid_report *report) 1865 { 1866 if (report->size) 1867 return ((report->size - 1) >> 3) + 1; 1868 1869 return 0; 1870 } 1871 1872 /* 1873 * Create a report. 'data' has to be allocated using 1874 * hid_alloc_report_buf() so that it has proper size. 1875 */ 1876 1877 void hid_output_report(struct hid_report *report, __u8 *data) 1878 { 1879 unsigned n; 1880 1881 if (report->id > 0) 1882 *data++ = report->id; 1883 1884 memset(data, 0, hid_compute_report_size(report)); 1885 for (n = 0; n < report->maxfield; n++) 1886 hid_output_field(report->device, report->field[n], data); 1887 } 1888 EXPORT_SYMBOL_GPL(hid_output_report); 1889 1890 /* 1891 * Allocator for buffer that is going to be passed to hid_output_report() 1892 */ 1893 u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags) 1894 { 1895 /* 1896 * 7 extra bytes are necessary to achieve proper functionality 1897 * of implement() working on 8 byte chunks 1898 */ 1899 1900 u32 len = hid_report_len(report) + 7; 1901 1902 return kmalloc(len, flags); 1903 } 1904 EXPORT_SYMBOL_GPL(hid_alloc_report_buf); 1905 1906 /* 1907 * Set a field value. The report this field belongs to has to be 1908 * created and transferred to the device, to set this value in the 1909 * device. 1910 */ 1911 1912 int hid_set_field(struct hid_field *field, unsigned offset, __s32 value) 1913 { 1914 unsigned size; 1915 1916 if (!field) 1917 return -1; 1918 1919 size = field->report_size; 1920 1921 hid_dump_input(field->report->device, field->usage + offset, value); 1922 1923 if (offset >= field->report_count) { 1924 hid_err(field->report->device, "offset (%d) exceeds report_count (%d)\n", 1925 offset, field->report_count); 1926 return -1; 1927 } 1928 if (field->logical_minimum < 0) { 1929 if (value != snto32(s32ton(value, size), size)) { 1930 hid_err(field->report->device, "value %d is out of range\n", value); 1931 return -1; 1932 } 1933 } 1934 field->value[offset] = value; 1935 return 0; 1936 } 1937 EXPORT_SYMBOL_GPL(hid_set_field); 1938 1939 struct hid_field *hid_find_field(struct hid_device *hdev, unsigned int report_type, 1940 unsigned int application, unsigned int usage) 1941 { 1942 struct list_head *report_list = &hdev->report_enum[report_type].report_list; 1943 struct hid_report *report; 1944 int i, j; 1945 1946 list_for_each_entry(report, report_list, list) { 1947 if (report->application != application) 1948 continue; 1949 1950 for (i = 0; i < report->maxfield; i++) { 1951 struct hid_field *field = report->field[i]; 1952 1953 for (j = 0; j < field->maxusage; j++) { 1954 if (field->usage[j].hid == usage) 1955 return field; 1956 } 1957 } 1958 } 1959 1960 return NULL; 1961 } 1962 EXPORT_SYMBOL_GPL(hid_find_field); 1963 1964 static struct hid_report *hid_get_report(struct hid_report_enum *report_enum, 1965 const u8 *data) 1966 { 1967 struct hid_report *report; 1968 unsigned int n = 0; /* Normally report number is 0 */ 1969 1970 /* Device uses numbered reports, data[0] is report number */ 1971 if (report_enum->numbered) 1972 n = *data; 1973 1974 report = report_enum->report_id_hash[n]; 1975 if (report == NULL) 1976 dbg_hid("undefined report_id %u received\n", n); 1977 1978 return report; 1979 } 1980 1981 /* 1982 * Implement a generic .request() callback, using .raw_request() 1983 * DO NOT USE in hid drivers directly, but through hid_hw_request instead. 1984 */ 1985 int __hid_request(struct hid_device *hid, struct hid_report *report, 1986 enum hid_class_request reqtype) 1987 { 1988 char *buf; 1989 int ret; 1990 u32 len; 1991 1992 buf = hid_alloc_report_buf(report, GFP_KERNEL); 1993 if (!buf) 1994 return -ENOMEM; 1995 1996 len = hid_report_len(report); 1997 1998 if (reqtype == HID_REQ_SET_REPORT) 1999 hid_output_report(report, buf); 2000 2001 ret = hid->ll_driver->raw_request(hid, report->id, buf, len, 2002 report->type, reqtype); 2003 if (ret < 0) { 2004 dbg_hid("unable to complete request: %d\n", ret); 2005 goto out; 2006 } 2007 2008 if (reqtype == HID_REQ_GET_REPORT) 2009 hid_input_report(hid, report->type, buf, ret, 0); 2010 2011 ret = 0; 2012 2013 out: 2014 kfree(buf); 2015 return ret; 2016 } 2017 EXPORT_SYMBOL_GPL(__hid_request); 2018 2019 int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size, 2020 int interrupt) 2021 { 2022 struct hid_report_enum *report_enum = hid->report_enum + type; 2023 struct hid_report *report; 2024 struct hid_driver *hdrv; 2025 int max_buffer_size = HID_MAX_BUFFER_SIZE; 2026 u32 rsize, csize = size; 2027 u8 *cdata = data; 2028 int ret = 0; 2029 2030 report = hid_get_report(report_enum, data); 2031 if (!report) 2032 goto out; 2033 2034 if (report_enum->numbered) { 2035 cdata++; 2036 csize--; 2037 } 2038 2039 rsize = hid_compute_report_size(report); 2040 2041 if (hid->ll_driver->max_buffer_size) 2042 max_buffer_size = hid->ll_driver->max_buffer_size; 2043 2044 if (report_enum->numbered && rsize >= max_buffer_size) 2045 rsize = max_buffer_size - 1; 2046 else if (rsize > max_buffer_size) 2047 rsize = max_buffer_size; 2048 2049 if (csize < rsize) { 2050 dbg_hid("report %d is too short, (%d < %d)\n", report->id, 2051 csize, rsize); 2052 memset(cdata + csize, 0, rsize - csize); 2053 } 2054 2055 if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event) 2056 hid->hiddev_report_event(hid, report); 2057 if (hid->claimed & HID_CLAIMED_HIDRAW) { 2058 ret = hidraw_report_event(hid, data, size); 2059 if (ret) 2060 goto out; 2061 } 2062 2063 if (hid->claimed != HID_CLAIMED_HIDRAW && report->maxfield) { 2064 hid_process_report(hid, report, cdata, interrupt); 2065 hdrv = hid->driver; 2066 if (hdrv && hdrv->report) 2067 hdrv->report(hid, report); 2068 } 2069 2070 if (hid->claimed & HID_CLAIMED_INPUT) 2071 hidinput_report_event(hid, report); 2072 out: 2073 return ret; 2074 } 2075 EXPORT_SYMBOL_GPL(hid_report_raw_event); 2076 2077 2078 static int __hid_input_report(struct hid_device *hid, enum hid_report_type type, 2079 u8 *data, u32 size, int interrupt, u64 source, bool from_bpf, 2080 bool lock_already_taken) 2081 { 2082 struct hid_report_enum *report_enum; 2083 struct hid_driver *hdrv; 2084 struct hid_report *report; 2085 int ret = 0; 2086 2087 if (!hid) 2088 return -ENODEV; 2089 2090 ret = down_trylock(&hid->driver_input_lock); 2091 if (lock_already_taken && !ret) { 2092 up(&hid->driver_input_lock); 2093 return -EINVAL; 2094 } else if (!lock_already_taken && ret) { 2095 return -EBUSY; 2096 } 2097 2098 if (!hid->driver) { 2099 ret = -ENODEV; 2100 goto unlock; 2101 } 2102 report_enum = hid->report_enum + type; 2103 hdrv = hid->driver; 2104 2105 data = dispatch_hid_bpf_device_event(hid, type, data, &size, interrupt, source, from_bpf); 2106 if (IS_ERR(data)) { 2107 ret = PTR_ERR(data); 2108 goto unlock; 2109 } 2110 2111 if (!size) { 2112 dbg_hid("empty report\n"); 2113 ret = -1; 2114 goto unlock; 2115 } 2116 2117 /* Avoid unnecessary overhead if debugfs is disabled */ 2118 if (!list_empty(&hid->debug_list)) 2119 hid_dump_report(hid, type, data, size); 2120 2121 report = hid_get_report(report_enum, data); 2122 2123 if (!report) { 2124 ret = -1; 2125 goto unlock; 2126 } 2127 2128 if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) { 2129 ret = hdrv->raw_event(hid, report, data, size); 2130 if (ret < 0) 2131 goto unlock; 2132 } 2133 2134 ret = hid_report_raw_event(hid, type, data, size, interrupt); 2135 2136 unlock: 2137 if (!lock_already_taken) 2138 up(&hid->driver_input_lock); 2139 return ret; 2140 } 2141 2142 /** 2143 * hid_input_report - report data from lower layer (usb, bt...) 2144 * 2145 * @hid: hid device 2146 * @type: HID report type (HID_*_REPORT) 2147 * @data: report contents 2148 * @size: size of data parameter 2149 * @interrupt: distinguish between interrupt and control transfers 2150 * 2151 * This is data entry for lower layers. 2152 */ 2153 int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size, 2154 int interrupt) 2155 { 2156 return __hid_input_report(hid, type, data, size, interrupt, 0, 2157 false, /* from_bpf */ 2158 false /* lock_already_taken */); 2159 } 2160 EXPORT_SYMBOL_GPL(hid_input_report); 2161 2162 bool hid_match_one_id(const struct hid_device *hdev, 2163 const struct hid_device_id *id) 2164 { 2165 return (id->bus == HID_BUS_ANY || id->bus == hdev->bus) && 2166 (id->group == HID_GROUP_ANY || id->group == hdev->group) && 2167 (id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) && 2168 (id->product == HID_ANY_ID || id->product == hdev->product); 2169 } 2170 2171 const struct hid_device_id *hid_match_id(const struct hid_device *hdev, 2172 const struct hid_device_id *id) 2173 { 2174 for (; id->bus; id++) 2175 if (hid_match_one_id(hdev, id)) 2176 return id; 2177 2178 return NULL; 2179 } 2180 EXPORT_SYMBOL_GPL(hid_match_id); 2181 2182 static const struct hid_device_id hid_hiddev_list[] = { 2183 { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS) }, 2184 { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS1) }, 2185 { } 2186 }; 2187 2188 static bool hid_hiddev(struct hid_device *hdev) 2189 { 2190 return !!hid_match_id(hdev, hid_hiddev_list); 2191 } 2192 2193 2194 static ssize_t 2195 read_report_descriptor(struct file *filp, struct kobject *kobj, 2196 struct bin_attribute *attr, 2197 char *buf, loff_t off, size_t count) 2198 { 2199 struct device *dev = kobj_to_dev(kobj); 2200 struct hid_device *hdev = to_hid_device(dev); 2201 2202 if (off >= hdev->rsize) 2203 return 0; 2204 2205 if (off + count > hdev->rsize) 2206 count = hdev->rsize - off; 2207 2208 memcpy(buf, hdev->rdesc + off, count); 2209 2210 return count; 2211 } 2212 2213 static ssize_t 2214 show_country(struct device *dev, struct device_attribute *attr, 2215 char *buf) 2216 { 2217 struct hid_device *hdev = to_hid_device(dev); 2218 2219 return sprintf(buf, "%02x\n", hdev->country & 0xff); 2220 } 2221 2222 static struct bin_attribute dev_bin_attr_report_desc = { 2223 .attr = { .name = "report_descriptor", .mode = 0444 }, 2224 .read = read_report_descriptor, 2225 .size = HID_MAX_DESCRIPTOR_SIZE, 2226 }; 2227 2228 static const struct device_attribute dev_attr_country = { 2229 .attr = { .name = "country", .mode = 0444 }, 2230 .show = show_country, 2231 }; 2232 2233 int hid_connect(struct hid_device *hdev, unsigned int connect_mask) 2234 { 2235 static const char *types[] = { "Device", "Pointer", "Mouse", "Device", 2236 "Joystick", "Gamepad", "Keyboard", "Keypad", 2237 "Multi-Axis Controller" 2238 }; 2239 const char *type, *bus; 2240 char buf[64] = ""; 2241 unsigned int i; 2242 int len; 2243 int ret; 2244 2245 ret = hid_bpf_connect_device(hdev); 2246 if (ret) 2247 return ret; 2248 2249 if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE) 2250 connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV); 2251 if (hdev->quirks & HID_QUIRK_HIDINPUT_FORCE) 2252 connect_mask |= HID_CONNECT_HIDINPUT_FORCE; 2253 if (hdev->bus != BUS_USB) 2254 connect_mask &= ~HID_CONNECT_HIDDEV; 2255 if (hid_hiddev(hdev)) 2256 connect_mask |= HID_CONNECT_HIDDEV_FORCE; 2257 2258 if ((connect_mask & HID_CONNECT_HIDINPUT) && !hidinput_connect(hdev, 2259 connect_mask & HID_CONNECT_HIDINPUT_FORCE)) 2260 hdev->claimed |= HID_CLAIMED_INPUT; 2261 2262 if ((connect_mask & HID_CONNECT_HIDDEV) && hdev->hiddev_connect && 2263 !hdev->hiddev_connect(hdev, 2264 connect_mask & HID_CONNECT_HIDDEV_FORCE)) 2265 hdev->claimed |= HID_CLAIMED_HIDDEV; 2266 if ((connect_mask & HID_CONNECT_HIDRAW) && !hidraw_connect(hdev)) 2267 hdev->claimed |= HID_CLAIMED_HIDRAW; 2268 2269 if (connect_mask & HID_CONNECT_DRIVER) 2270 hdev->claimed |= HID_CLAIMED_DRIVER; 2271 2272 /* Drivers with the ->raw_event callback set are not required to connect 2273 * to any other listener. */ 2274 if (!hdev->claimed && !hdev->driver->raw_event) { 2275 hid_err(hdev, "device has no listeners, quitting\n"); 2276 return -ENODEV; 2277 } 2278 2279 hid_process_ordering(hdev); 2280 2281 if ((hdev->claimed & HID_CLAIMED_INPUT) && 2282 (connect_mask & HID_CONNECT_FF) && hdev->ff_init) 2283 hdev->ff_init(hdev); 2284 2285 len = 0; 2286 if (hdev->claimed & HID_CLAIMED_INPUT) 2287 len += sprintf(buf + len, "input"); 2288 if (hdev->claimed & HID_CLAIMED_HIDDEV) 2289 len += sprintf(buf + len, "%shiddev%d", len ? "," : "", 2290 ((struct hiddev *)hdev->hiddev)->minor); 2291 if (hdev->claimed & HID_CLAIMED_HIDRAW) 2292 len += sprintf(buf + len, "%shidraw%d", len ? "," : "", 2293 ((struct hidraw *)hdev->hidraw)->minor); 2294 2295 type = "Device"; 2296 for (i = 0; i < hdev->maxcollection; i++) { 2297 struct hid_collection *col = &hdev->collection[i]; 2298 if (col->type == HID_COLLECTION_APPLICATION && 2299 (col->usage & HID_USAGE_PAGE) == HID_UP_GENDESK && 2300 (col->usage & 0xffff) < ARRAY_SIZE(types)) { 2301 type = types[col->usage & 0xffff]; 2302 break; 2303 } 2304 } 2305 2306 switch (hdev->bus) { 2307 case BUS_USB: 2308 bus = "USB"; 2309 break; 2310 case BUS_BLUETOOTH: 2311 bus = "BLUETOOTH"; 2312 break; 2313 case BUS_I2C: 2314 bus = "I2C"; 2315 break; 2316 case BUS_VIRTUAL: 2317 bus = "VIRTUAL"; 2318 break; 2319 case BUS_INTEL_ISHTP: 2320 case BUS_AMD_SFH: 2321 bus = "SENSOR HUB"; 2322 break; 2323 default: 2324 bus = "<UNKNOWN>"; 2325 } 2326 2327 ret = device_create_file(&hdev->dev, &dev_attr_country); 2328 if (ret) 2329 hid_warn(hdev, 2330 "can't create sysfs country code attribute err: %d\n", ret); 2331 2332 hid_info(hdev, "%s: %s HID v%x.%02x %s [%s] on %s\n", 2333 buf, bus, hdev->version >> 8, hdev->version & 0xff, 2334 type, hdev->name, hdev->phys); 2335 2336 return 0; 2337 } 2338 EXPORT_SYMBOL_GPL(hid_connect); 2339 2340 void hid_disconnect(struct hid_device *hdev) 2341 { 2342 device_remove_file(&hdev->dev, &dev_attr_country); 2343 if (hdev->claimed & HID_CLAIMED_INPUT) 2344 hidinput_disconnect(hdev); 2345 if (hdev->claimed & HID_CLAIMED_HIDDEV) 2346 hdev->hiddev_disconnect(hdev); 2347 if (hdev->claimed & HID_CLAIMED_HIDRAW) 2348 hidraw_disconnect(hdev); 2349 hdev->claimed = 0; 2350 2351 hid_bpf_disconnect_device(hdev); 2352 } 2353 EXPORT_SYMBOL_GPL(hid_disconnect); 2354 2355 /** 2356 * hid_hw_start - start underlying HW 2357 * @hdev: hid device 2358 * @connect_mask: which outputs to connect, see HID_CONNECT_* 2359 * 2360 * Call this in probe function *after* hid_parse. This will setup HW 2361 * buffers and start the device (if not defeirred to device open). 2362 * hid_hw_stop must be called if this was successful. 2363 */ 2364 int hid_hw_start(struct hid_device *hdev, unsigned int connect_mask) 2365 { 2366 int error; 2367 2368 error = hdev->ll_driver->start(hdev); 2369 if (error) 2370 return error; 2371 2372 if (connect_mask) { 2373 error = hid_connect(hdev, connect_mask); 2374 if (error) { 2375 hdev->ll_driver->stop(hdev); 2376 return error; 2377 } 2378 } 2379 2380 return 0; 2381 } 2382 EXPORT_SYMBOL_GPL(hid_hw_start); 2383 2384 /** 2385 * hid_hw_stop - stop underlying HW 2386 * @hdev: hid device 2387 * 2388 * This is usually called from remove function or from probe when something 2389 * failed and hid_hw_start was called already. 2390 */ 2391 void hid_hw_stop(struct hid_device *hdev) 2392 { 2393 hid_disconnect(hdev); 2394 hdev->ll_driver->stop(hdev); 2395 } 2396 EXPORT_SYMBOL_GPL(hid_hw_stop); 2397 2398 /** 2399 * hid_hw_open - signal underlying HW to start delivering events 2400 * @hdev: hid device 2401 * 2402 * Tell underlying HW to start delivering events from the device. 2403 * This function should be called sometime after successful call 2404 * to hid_hw_start(). 2405 */ 2406 int hid_hw_open(struct hid_device *hdev) 2407 { 2408 int ret; 2409 2410 ret = mutex_lock_killable(&hdev->ll_open_lock); 2411 if (ret) 2412 return ret; 2413 2414 if (!hdev->ll_open_count++) { 2415 ret = hdev->ll_driver->open(hdev); 2416 if (ret) 2417 hdev->ll_open_count--; 2418 } 2419 2420 mutex_unlock(&hdev->ll_open_lock); 2421 return ret; 2422 } 2423 EXPORT_SYMBOL_GPL(hid_hw_open); 2424 2425 /** 2426 * hid_hw_close - signal underlaying HW to stop delivering events 2427 * 2428 * @hdev: hid device 2429 * 2430 * This function indicates that we are not interested in the events 2431 * from this device anymore. Delivery of events may or may not stop, 2432 * depending on the number of users still outstanding. 2433 */ 2434 void hid_hw_close(struct hid_device *hdev) 2435 { 2436 mutex_lock(&hdev->ll_open_lock); 2437 if (!--hdev->ll_open_count) 2438 hdev->ll_driver->close(hdev); 2439 mutex_unlock(&hdev->ll_open_lock); 2440 } 2441 EXPORT_SYMBOL_GPL(hid_hw_close); 2442 2443 /** 2444 * hid_hw_request - send report request to device 2445 * 2446 * @hdev: hid device 2447 * @report: report to send 2448 * @reqtype: hid request type 2449 */ 2450 void hid_hw_request(struct hid_device *hdev, 2451 struct hid_report *report, enum hid_class_request reqtype) 2452 { 2453 if (hdev->ll_driver->request) 2454 return hdev->ll_driver->request(hdev, report, reqtype); 2455 2456 __hid_request(hdev, report, reqtype); 2457 } 2458 EXPORT_SYMBOL_GPL(hid_hw_request); 2459 2460 int __hid_hw_raw_request(struct hid_device *hdev, 2461 unsigned char reportnum, __u8 *buf, 2462 size_t len, enum hid_report_type rtype, 2463 enum hid_class_request reqtype, 2464 u64 source, bool from_bpf) 2465 { 2466 unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE; 2467 int ret; 2468 2469 if (hdev->ll_driver->max_buffer_size) 2470 max_buffer_size = hdev->ll_driver->max_buffer_size; 2471 2472 if (len < 1 || len > max_buffer_size || !buf) 2473 return -EINVAL; 2474 2475 ret = dispatch_hid_bpf_raw_requests(hdev, reportnum, buf, len, rtype, 2476 reqtype, source, from_bpf); 2477 if (ret) 2478 return ret; 2479 2480 return hdev->ll_driver->raw_request(hdev, reportnum, buf, len, 2481 rtype, reqtype); 2482 } 2483 2484 /** 2485 * hid_hw_raw_request - send report request to device 2486 * 2487 * @hdev: hid device 2488 * @reportnum: report ID 2489 * @buf: in/out data to transfer 2490 * @len: length of buf 2491 * @rtype: HID report type 2492 * @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT 2493 * 2494 * Return: count of data transferred, negative if error 2495 * 2496 * Same behavior as hid_hw_request, but with raw buffers instead. 2497 */ 2498 int hid_hw_raw_request(struct hid_device *hdev, 2499 unsigned char reportnum, __u8 *buf, 2500 size_t len, enum hid_report_type rtype, enum hid_class_request reqtype) 2501 { 2502 return __hid_hw_raw_request(hdev, reportnum, buf, len, rtype, reqtype, 0, false); 2503 } 2504 EXPORT_SYMBOL_GPL(hid_hw_raw_request); 2505 2506 int __hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len, u64 source, 2507 bool from_bpf) 2508 { 2509 unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE; 2510 int ret; 2511 2512 if (hdev->ll_driver->max_buffer_size) 2513 max_buffer_size = hdev->ll_driver->max_buffer_size; 2514 2515 if (len < 1 || len > max_buffer_size || !buf) 2516 return -EINVAL; 2517 2518 ret = dispatch_hid_bpf_output_report(hdev, buf, len, source, from_bpf); 2519 if (ret) 2520 return ret; 2521 2522 if (hdev->ll_driver->output_report) 2523 return hdev->ll_driver->output_report(hdev, buf, len); 2524 2525 return -ENOSYS; 2526 } 2527 2528 /** 2529 * hid_hw_output_report - send output report to device 2530 * 2531 * @hdev: hid device 2532 * @buf: raw data to transfer 2533 * @len: length of buf 2534 * 2535 * Return: count of data transferred, negative if error 2536 */ 2537 int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len) 2538 { 2539 return __hid_hw_output_report(hdev, buf, len, 0, false); 2540 } 2541 EXPORT_SYMBOL_GPL(hid_hw_output_report); 2542 2543 #ifdef CONFIG_PM 2544 int hid_driver_suspend(struct hid_device *hdev, pm_message_t state) 2545 { 2546 if (hdev->driver && hdev->driver->suspend) 2547 return hdev->driver->suspend(hdev, state); 2548 2549 return 0; 2550 } 2551 EXPORT_SYMBOL_GPL(hid_driver_suspend); 2552 2553 int hid_driver_reset_resume(struct hid_device *hdev) 2554 { 2555 if (hdev->driver && hdev->driver->reset_resume) 2556 return hdev->driver->reset_resume(hdev); 2557 2558 return 0; 2559 } 2560 EXPORT_SYMBOL_GPL(hid_driver_reset_resume); 2561 2562 int hid_driver_resume(struct hid_device *hdev) 2563 { 2564 if (hdev->driver && hdev->driver->resume) 2565 return hdev->driver->resume(hdev); 2566 2567 return 0; 2568 } 2569 EXPORT_SYMBOL_GPL(hid_driver_resume); 2570 #endif /* CONFIG_PM */ 2571 2572 struct hid_dynid { 2573 struct list_head list; 2574 struct hid_device_id id; 2575 }; 2576 2577 /** 2578 * new_id_store - add a new HID device ID to this driver and re-probe devices 2579 * @drv: target device driver 2580 * @buf: buffer for scanning device ID data 2581 * @count: input size 2582 * 2583 * Adds a new dynamic hid device ID to this driver, 2584 * and causes the driver to probe for all devices again. 2585 */ 2586 static ssize_t new_id_store(struct device_driver *drv, const char *buf, 2587 size_t count) 2588 { 2589 struct hid_driver *hdrv = to_hid_driver(drv); 2590 struct hid_dynid *dynid; 2591 __u32 bus, vendor, product; 2592 unsigned long driver_data = 0; 2593 int ret; 2594 2595 ret = sscanf(buf, "%x %x %x %lx", 2596 &bus, &vendor, &product, &driver_data); 2597 if (ret < 3) 2598 return -EINVAL; 2599 2600 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); 2601 if (!dynid) 2602 return -ENOMEM; 2603 2604 dynid->id.bus = bus; 2605 dynid->id.group = HID_GROUP_ANY; 2606 dynid->id.vendor = vendor; 2607 dynid->id.product = product; 2608 dynid->id.driver_data = driver_data; 2609 2610 spin_lock(&hdrv->dyn_lock); 2611 list_add_tail(&dynid->list, &hdrv->dyn_list); 2612 spin_unlock(&hdrv->dyn_lock); 2613 2614 ret = driver_attach(&hdrv->driver); 2615 2616 return ret ? : count; 2617 } 2618 static DRIVER_ATTR_WO(new_id); 2619 2620 static struct attribute *hid_drv_attrs[] = { 2621 &driver_attr_new_id.attr, 2622 NULL, 2623 }; 2624 ATTRIBUTE_GROUPS(hid_drv); 2625 2626 static void hid_free_dynids(struct hid_driver *hdrv) 2627 { 2628 struct hid_dynid *dynid, *n; 2629 2630 spin_lock(&hdrv->dyn_lock); 2631 list_for_each_entry_safe(dynid, n, &hdrv->dyn_list, list) { 2632 list_del(&dynid->list); 2633 kfree(dynid); 2634 } 2635 spin_unlock(&hdrv->dyn_lock); 2636 } 2637 2638 const struct hid_device_id *hid_match_device(struct hid_device *hdev, 2639 struct hid_driver *hdrv) 2640 { 2641 struct hid_dynid *dynid; 2642 2643 spin_lock(&hdrv->dyn_lock); 2644 list_for_each_entry(dynid, &hdrv->dyn_list, list) { 2645 if (hid_match_one_id(hdev, &dynid->id)) { 2646 spin_unlock(&hdrv->dyn_lock); 2647 return &dynid->id; 2648 } 2649 } 2650 spin_unlock(&hdrv->dyn_lock); 2651 2652 return hid_match_id(hdev, hdrv->id_table); 2653 } 2654 EXPORT_SYMBOL_GPL(hid_match_device); 2655 2656 static int hid_bus_match(struct device *dev, const struct device_driver *drv) 2657 { 2658 struct hid_driver *hdrv = to_hid_driver(drv); 2659 struct hid_device *hdev = to_hid_device(dev); 2660 2661 return hid_match_device(hdev, hdrv) != NULL; 2662 } 2663 2664 /** 2665 * hid_compare_device_paths - check if both devices share the same path 2666 * @hdev_a: hid device 2667 * @hdev_b: hid device 2668 * @separator: char to use as separator 2669 * 2670 * Check if two devices share the same path up to the last occurrence of 2671 * the separator char. Both paths must exist (i.e., zero-length paths 2672 * don't match). 2673 */ 2674 bool hid_compare_device_paths(struct hid_device *hdev_a, 2675 struct hid_device *hdev_b, char separator) 2676 { 2677 int n1 = strrchr(hdev_a->phys, separator) - hdev_a->phys; 2678 int n2 = strrchr(hdev_b->phys, separator) - hdev_b->phys; 2679 2680 if (n1 != n2 || n1 <= 0 || n2 <= 0) 2681 return false; 2682 2683 return !strncmp(hdev_a->phys, hdev_b->phys, n1); 2684 } 2685 EXPORT_SYMBOL_GPL(hid_compare_device_paths); 2686 2687 static bool hid_check_device_match(struct hid_device *hdev, 2688 struct hid_driver *hdrv, 2689 const struct hid_device_id **id) 2690 { 2691 *id = hid_match_device(hdev, hdrv); 2692 if (!*id) 2693 return false; 2694 2695 if (hdrv->match) 2696 return hdrv->match(hdev, hid_ignore_special_drivers); 2697 2698 /* 2699 * hid-generic implements .match(), so we must be dealing with a 2700 * different HID driver here, and can simply check if 2701 * hid_ignore_special_drivers or HID_QUIRK_IGNORE_SPECIAL_DRIVER 2702 * are set or not. 2703 */ 2704 return !hid_ignore_special_drivers && !(hdev->quirks & HID_QUIRK_IGNORE_SPECIAL_DRIVER); 2705 } 2706 2707 static int __hid_device_probe(struct hid_device *hdev, struct hid_driver *hdrv) 2708 { 2709 const struct hid_device_id *id; 2710 int ret; 2711 2712 if (!hdev->bpf_rsize) { 2713 unsigned int quirks; 2714 2715 /* reset the quirks that has been previously set */ 2716 quirks = hid_lookup_quirk(hdev); 2717 hdev->quirks = quirks; 2718 2719 /* in case a bpf program gets detached, we need to free the old one */ 2720 hid_free_bpf_rdesc(hdev); 2721 2722 /* keep this around so we know we called it once */ 2723 hdev->bpf_rsize = hdev->dev_rsize; 2724 2725 /* call_hid_bpf_rdesc_fixup will always return a valid pointer */ 2726 hdev->bpf_rdesc = call_hid_bpf_rdesc_fixup(hdev, hdev->dev_rdesc, 2727 &hdev->bpf_rsize); 2728 if (quirks ^ hdev->quirks) 2729 hid_info(hdev, "HID-BPF toggled quirks on the device: %04x", 2730 quirks ^ hdev->quirks); 2731 } 2732 2733 if (!hid_check_device_match(hdev, hdrv, &id)) 2734 return -ENODEV; 2735 2736 hdev->devres_group_id = devres_open_group(&hdev->dev, NULL, GFP_KERNEL); 2737 if (!hdev->devres_group_id) 2738 return -ENOMEM; 2739 2740 hdev->driver = hdrv; 2741 2742 if (hdrv->probe) { 2743 ret = hdrv->probe(hdev, id); 2744 } else { /* default probe */ 2745 ret = hid_open_report(hdev); 2746 if (!ret) 2747 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); 2748 } 2749 2750 /* 2751 * Note that we are not closing the devres group opened above so 2752 * even resources that were attached to the device after probe is 2753 * run are released when hid_device_remove() is executed. This is 2754 * needed as some drivers would allocate additional resources, 2755 * for example when updating firmware. 2756 */ 2757 2758 if (ret) { 2759 devres_release_group(&hdev->dev, hdev->devres_group_id); 2760 hid_close_report(hdev); 2761 hdev->driver = NULL; 2762 } 2763 2764 return ret; 2765 } 2766 2767 static int hid_device_probe(struct device *dev) 2768 { 2769 struct hid_device *hdev = to_hid_device(dev); 2770 struct hid_driver *hdrv = to_hid_driver(dev->driver); 2771 int ret = 0; 2772 2773 if (down_interruptible(&hdev->driver_input_lock)) 2774 return -EINTR; 2775 2776 hdev->io_started = false; 2777 clear_bit(ffs(HID_STAT_REPROBED), &hdev->status); 2778 2779 if (!hdev->driver) 2780 ret = __hid_device_probe(hdev, hdrv); 2781 2782 if (!hdev->io_started) 2783 up(&hdev->driver_input_lock); 2784 2785 return ret; 2786 } 2787 2788 static void hid_device_remove(struct device *dev) 2789 { 2790 struct hid_device *hdev = to_hid_device(dev); 2791 struct hid_driver *hdrv; 2792 2793 down(&hdev->driver_input_lock); 2794 hdev->io_started = false; 2795 2796 hdrv = hdev->driver; 2797 if (hdrv) { 2798 if (hdrv->remove) 2799 hdrv->remove(hdev); 2800 else /* default remove */ 2801 hid_hw_stop(hdev); 2802 2803 /* Release all devres resources allocated by the driver */ 2804 devres_release_group(&hdev->dev, hdev->devres_group_id); 2805 2806 hid_close_report(hdev); 2807 hdev->driver = NULL; 2808 } 2809 2810 if (!hdev->io_started) 2811 up(&hdev->driver_input_lock); 2812 } 2813 2814 static ssize_t modalias_show(struct device *dev, struct device_attribute *a, 2815 char *buf) 2816 { 2817 struct hid_device *hdev = container_of(dev, struct hid_device, dev); 2818 2819 return scnprintf(buf, PAGE_SIZE, "hid:b%04Xg%04Xv%08Xp%08X\n", 2820 hdev->bus, hdev->group, hdev->vendor, hdev->product); 2821 } 2822 static DEVICE_ATTR_RO(modalias); 2823 2824 static struct attribute *hid_dev_attrs[] = { 2825 &dev_attr_modalias.attr, 2826 NULL, 2827 }; 2828 static struct bin_attribute *hid_dev_bin_attrs[] = { 2829 &dev_bin_attr_report_desc, 2830 NULL 2831 }; 2832 static const struct attribute_group hid_dev_group = { 2833 .attrs = hid_dev_attrs, 2834 .bin_attrs = hid_dev_bin_attrs, 2835 }; 2836 __ATTRIBUTE_GROUPS(hid_dev); 2837 2838 static int hid_uevent(const struct device *dev, struct kobj_uevent_env *env) 2839 { 2840 const struct hid_device *hdev = to_hid_device(dev); 2841 2842 if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X", 2843 hdev->bus, hdev->vendor, hdev->product)) 2844 return -ENOMEM; 2845 2846 if (add_uevent_var(env, "HID_NAME=%s", hdev->name)) 2847 return -ENOMEM; 2848 2849 if (add_uevent_var(env, "HID_PHYS=%s", hdev->phys)) 2850 return -ENOMEM; 2851 2852 if (add_uevent_var(env, "HID_UNIQ=%s", hdev->uniq)) 2853 return -ENOMEM; 2854 2855 if (add_uevent_var(env, "MODALIAS=hid:b%04Xg%04Xv%08Xp%08X", 2856 hdev->bus, hdev->group, hdev->vendor, hdev->product)) 2857 return -ENOMEM; 2858 2859 return 0; 2860 } 2861 2862 const struct bus_type hid_bus_type = { 2863 .name = "hid", 2864 .dev_groups = hid_dev_groups, 2865 .drv_groups = hid_drv_groups, 2866 .match = hid_bus_match, 2867 .probe = hid_device_probe, 2868 .remove = hid_device_remove, 2869 .uevent = hid_uevent, 2870 }; 2871 EXPORT_SYMBOL(hid_bus_type); 2872 2873 int hid_add_device(struct hid_device *hdev) 2874 { 2875 static atomic_t id = ATOMIC_INIT(0); 2876 int ret; 2877 2878 if (WARN_ON(hdev->status & HID_STAT_ADDED)) 2879 return -EBUSY; 2880 2881 hdev->quirks = hid_lookup_quirk(hdev); 2882 2883 /* we need to kill them here, otherwise they will stay allocated to 2884 * wait for coming driver */ 2885 if (hid_ignore(hdev)) 2886 return -ENODEV; 2887 2888 /* 2889 * Check for the mandatory transport channel. 2890 */ 2891 if (!hdev->ll_driver->raw_request) { 2892 hid_err(hdev, "transport driver missing .raw_request()\n"); 2893 return -EINVAL; 2894 } 2895 2896 /* 2897 * Read the device report descriptor once and use as template 2898 * for the driver-specific modifications. 2899 */ 2900 ret = hdev->ll_driver->parse(hdev); 2901 if (ret) 2902 return ret; 2903 if (!hdev->dev_rdesc) 2904 return -ENODEV; 2905 2906 /* 2907 * Scan generic devices for group information 2908 */ 2909 if (hid_ignore_special_drivers) { 2910 hdev->group = HID_GROUP_GENERIC; 2911 } else if (!hdev->group && 2912 !(hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)) { 2913 ret = hid_scan_report(hdev); 2914 if (ret) 2915 hid_warn(hdev, "bad device descriptor (%d)\n", ret); 2916 } 2917 2918 hdev->id = atomic_inc_return(&id); 2919 2920 /* XXX hack, any other cleaner solution after the driver core 2921 * is converted to allow more than 20 bytes as the device name? */ 2922 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus, 2923 hdev->vendor, hdev->product, hdev->id); 2924 2925 hid_debug_register(hdev, dev_name(&hdev->dev)); 2926 ret = device_add(&hdev->dev); 2927 if (!ret) 2928 hdev->status |= HID_STAT_ADDED; 2929 else 2930 hid_debug_unregister(hdev); 2931 2932 return ret; 2933 } 2934 EXPORT_SYMBOL_GPL(hid_add_device); 2935 2936 /** 2937 * hid_allocate_device - allocate new hid device descriptor 2938 * 2939 * Allocate and initialize hid device, so that hid_destroy_device might be 2940 * used to free it. 2941 * 2942 * New hid_device pointer is returned on success, otherwise ERR_PTR encoded 2943 * error value. 2944 */ 2945 struct hid_device *hid_allocate_device(void) 2946 { 2947 struct hid_device *hdev; 2948 int ret = -ENOMEM; 2949 2950 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL); 2951 if (hdev == NULL) 2952 return ERR_PTR(ret); 2953 2954 device_initialize(&hdev->dev); 2955 hdev->dev.release = hid_device_release; 2956 hdev->dev.bus = &hid_bus_type; 2957 device_enable_async_suspend(&hdev->dev); 2958 2959 hid_close_report(hdev); 2960 2961 init_waitqueue_head(&hdev->debug_wait); 2962 INIT_LIST_HEAD(&hdev->debug_list); 2963 spin_lock_init(&hdev->debug_list_lock); 2964 sema_init(&hdev->driver_input_lock, 1); 2965 mutex_init(&hdev->ll_open_lock); 2966 kref_init(&hdev->ref); 2967 2968 ret = hid_bpf_device_init(hdev); 2969 if (ret) 2970 goto out_err; 2971 2972 return hdev; 2973 2974 out_err: 2975 hid_destroy_device(hdev); 2976 return ERR_PTR(ret); 2977 } 2978 EXPORT_SYMBOL_GPL(hid_allocate_device); 2979 2980 static void hid_remove_device(struct hid_device *hdev) 2981 { 2982 if (hdev->status & HID_STAT_ADDED) { 2983 device_del(&hdev->dev); 2984 hid_debug_unregister(hdev); 2985 hdev->status &= ~HID_STAT_ADDED; 2986 } 2987 hid_free_bpf_rdesc(hdev); 2988 kfree(hdev->dev_rdesc); 2989 hdev->dev_rdesc = NULL; 2990 hdev->dev_rsize = 0; 2991 hdev->bpf_rsize = 0; 2992 } 2993 2994 /** 2995 * hid_destroy_device - free previously allocated device 2996 * 2997 * @hdev: hid device 2998 * 2999 * If you allocate hid_device through hid_allocate_device, you should ever 3000 * free by this function. 3001 */ 3002 void hid_destroy_device(struct hid_device *hdev) 3003 { 3004 hid_bpf_destroy_device(hdev); 3005 hid_remove_device(hdev); 3006 put_device(&hdev->dev); 3007 } 3008 EXPORT_SYMBOL_GPL(hid_destroy_device); 3009 3010 3011 static int __hid_bus_reprobe_drivers(struct device *dev, void *data) 3012 { 3013 struct hid_driver *hdrv = data; 3014 struct hid_device *hdev = to_hid_device(dev); 3015 3016 if (hdev->driver == hdrv && 3017 !hdrv->match(hdev, hid_ignore_special_drivers) && 3018 !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status)) 3019 return device_reprobe(dev); 3020 3021 return 0; 3022 } 3023 3024 static int __hid_bus_driver_added(struct device_driver *drv, void *data) 3025 { 3026 struct hid_driver *hdrv = to_hid_driver(drv); 3027 3028 if (hdrv->match) { 3029 bus_for_each_dev(&hid_bus_type, NULL, hdrv, 3030 __hid_bus_reprobe_drivers); 3031 } 3032 3033 return 0; 3034 } 3035 3036 static int __bus_removed_driver(struct device_driver *drv, void *data) 3037 { 3038 return bus_rescan_devices(&hid_bus_type); 3039 } 3040 3041 int __hid_register_driver(struct hid_driver *hdrv, struct module *owner, 3042 const char *mod_name) 3043 { 3044 int ret; 3045 3046 hdrv->driver.name = hdrv->name; 3047 hdrv->driver.bus = &hid_bus_type; 3048 hdrv->driver.owner = owner; 3049 hdrv->driver.mod_name = mod_name; 3050 3051 INIT_LIST_HEAD(&hdrv->dyn_list); 3052 spin_lock_init(&hdrv->dyn_lock); 3053 3054 ret = driver_register(&hdrv->driver); 3055 3056 if (ret == 0) 3057 bus_for_each_drv(&hid_bus_type, NULL, NULL, 3058 __hid_bus_driver_added); 3059 3060 return ret; 3061 } 3062 EXPORT_SYMBOL_GPL(__hid_register_driver); 3063 3064 void hid_unregister_driver(struct hid_driver *hdrv) 3065 { 3066 driver_unregister(&hdrv->driver); 3067 hid_free_dynids(hdrv); 3068 3069 bus_for_each_drv(&hid_bus_type, NULL, hdrv, __bus_removed_driver); 3070 } 3071 EXPORT_SYMBOL_GPL(hid_unregister_driver); 3072 3073 int hid_check_keys_pressed(struct hid_device *hid) 3074 { 3075 struct hid_input *hidinput; 3076 int i; 3077 3078 if (!(hid->claimed & HID_CLAIMED_INPUT)) 3079 return 0; 3080 3081 list_for_each_entry(hidinput, &hid->inputs, list) { 3082 for (i = 0; i < BITS_TO_LONGS(KEY_MAX); i++) 3083 if (hidinput->input->key[i]) 3084 return 1; 3085 } 3086 3087 return 0; 3088 } 3089 EXPORT_SYMBOL_GPL(hid_check_keys_pressed); 3090 3091 #ifdef CONFIG_HID_BPF 3092 static struct hid_ops __hid_ops = { 3093 .hid_get_report = hid_get_report, 3094 .hid_hw_raw_request = __hid_hw_raw_request, 3095 .hid_hw_output_report = __hid_hw_output_report, 3096 .hid_input_report = __hid_input_report, 3097 .owner = THIS_MODULE, 3098 .bus_type = &hid_bus_type, 3099 }; 3100 #endif 3101 3102 static int __init hid_init(void) 3103 { 3104 int ret; 3105 3106 ret = bus_register(&hid_bus_type); 3107 if (ret) { 3108 pr_err("can't register hid bus\n"); 3109 goto err; 3110 } 3111 3112 #ifdef CONFIG_HID_BPF 3113 hid_ops = &__hid_ops; 3114 #endif 3115 3116 ret = hidraw_init(); 3117 if (ret) 3118 goto err_bus; 3119 3120 hid_debug_init(); 3121 3122 return 0; 3123 err_bus: 3124 bus_unregister(&hid_bus_type); 3125 err: 3126 return ret; 3127 } 3128 3129 static void __exit hid_exit(void) 3130 { 3131 #ifdef CONFIG_HID_BPF 3132 hid_ops = NULL; 3133 #endif 3134 hid_debug_exit(); 3135 hidraw_exit(); 3136 bus_unregister(&hid_bus_type); 3137 hid_quirks_exit(HID_BUS_ANY); 3138 } 3139 3140 module_init(hid_init); 3141 module_exit(hid_exit); 3142 3143 MODULE_AUTHOR("Andreas Gal"); 3144 MODULE_AUTHOR("Vojtech Pavlik"); 3145 MODULE_AUTHOR("Jiri Kosina"); 3146 MODULE_DESCRIPTION("HID support for Linux"); 3147 MODULE_LICENSE("GPL"); 3148