1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * HID support for Linux 4 * 5 * Copyright (c) 1999 Andreas Gal 6 * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> 7 * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc 8 * Copyright (c) 2006-2012 Jiri Kosina 9 */ 10 11 /* 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 16 #include <linux/module.h> 17 #include <linux/slab.h> 18 #include <linux/init.h> 19 #include <linux/kernel.h> 20 #include <linux/list.h> 21 #include <linux/mm.h> 22 #include <linux/spinlock.h> 23 #include <asm/unaligned.h> 24 #include <asm/byteorder.h> 25 #include <linux/input.h> 26 #include <linux/wait.h> 27 #include <linux/vmalloc.h> 28 #include <linux/sched.h> 29 #include <linux/semaphore.h> 30 31 #include <linux/hid.h> 32 #include <linux/hiddev.h> 33 #include <linux/hid-debug.h> 34 #include <linux/hidraw.h> 35 36 #include "hid-ids.h" 37 38 /* 39 * Version Information 40 */ 41 42 #define DRIVER_DESC "HID core driver" 43 44 int hid_debug = 0; 45 module_param_named(debug, hid_debug, int, 0600); 46 MODULE_PARM_DESC(debug, "toggle HID debugging messages"); 47 EXPORT_SYMBOL_GPL(hid_debug); 48 49 static int hid_ignore_special_drivers = 0; 50 module_param_named(ignore_special_drivers, hid_ignore_special_drivers, int, 0600); 51 MODULE_PARM_DESC(ignore_special_drivers, "Ignore any special drivers and handle all devices by generic driver"); 52 53 /* 54 * Register a new report for a device. 55 */ 56 57 struct hid_report *hid_register_report(struct hid_device *device, 58 unsigned int type, unsigned int id, 59 unsigned int application) 60 { 61 struct hid_report_enum *report_enum = device->report_enum + type; 62 struct hid_report *report; 63 64 if (id >= HID_MAX_IDS) 65 return NULL; 66 if (report_enum->report_id_hash[id]) 67 return report_enum->report_id_hash[id]; 68 69 report = kzalloc(sizeof(struct hid_report), GFP_KERNEL); 70 if (!report) 71 return NULL; 72 73 if (id != 0) 74 report_enum->numbered = 1; 75 76 report->id = id; 77 report->type = type; 78 report->size = 0; 79 report->device = device; 80 report->application = application; 81 report_enum->report_id_hash[id] = report; 82 83 list_add_tail(&report->list, &report_enum->report_list); 84 85 return report; 86 } 87 EXPORT_SYMBOL_GPL(hid_register_report); 88 89 /* 90 * Register a new field for this report. 91 */ 92 93 static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages, unsigned values) 94 { 95 struct hid_field *field; 96 97 if (report->maxfield == HID_MAX_FIELDS) { 98 hid_err(report->device, "too many fields in report\n"); 99 return NULL; 100 } 101 102 field = kzalloc((sizeof(struct hid_field) + 103 usages * sizeof(struct hid_usage) + 104 values * sizeof(unsigned)), GFP_KERNEL); 105 if (!field) 106 return NULL; 107 108 field->index = report->maxfield++; 109 report->field[field->index] = field; 110 field->usage = (struct hid_usage *)(field + 1); 111 field->value = (s32 *)(field->usage + usages); 112 field->report = report; 113 114 return field; 115 } 116 117 /* 118 * Open a collection. The type/usage is pushed on the stack. 119 */ 120 121 static int open_collection(struct hid_parser *parser, unsigned type) 122 { 123 struct hid_collection *collection; 124 unsigned usage; 125 int collection_index; 126 127 usage = parser->local.usage[0]; 128 129 if (parser->collection_stack_ptr == parser->collection_stack_size) { 130 unsigned int *collection_stack; 131 unsigned int new_size = parser->collection_stack_size + 132 HID_COLLECTION_STACK_SIZE; 133 134 collection_stack = krealloc(parser->collection_stack, 135 new_size * sizeof(unsigned int), 136 GFP_KERNEL); 137 if (!collection_stack) 138 return -ENOMEM; 139 140 parser->collection_stack = collection_stack; 141 parser->collection_stack_size = new_size; 142 } 143 144 if (parser->device->maxcollection == parser->device->collection_size) { 145 collection = kmalloc( 146 array3_size(sizeof(struct hid_collection), 147 parser->device->collection_size, 148 2), 149 GFP_KERNEL); 150 if (collection == NULL) { 151 hid_err(parser->device, "failed to reallocate collection array\n"); 152 return -ENOMEM; 153 } 154 memcpy(collection, parser->device->collection, 155 sizeof(struct hid_collection) * 156 parser->device->collection_size); 157 memset(collection + parser->device->collection_size, 0, 158 sizeof(struct hid_collection) * 159 parser->device->collection_size); 160 kfree(parser->device->collection); 161 parser->device->collection = collection; 162 parser->device->collection_size *= 2; 163 } 164 165 parser->collection_stack[parser->collection_stack_ptr++] = 166 parser->device->maxcollection; 167 168 collection_index = parser->device->maxcollection++; 169 collection = parser->device->collection + collection_index; 170 collection->type = type; 171 collection->usage = usage; 172 collection->level = parser->collection_stack_ptr - 1; 173 collection->parent_idx = (collection->level == 0) ? -1 : 174 parser->collection_stack[collection->level - 1]; 175 176 if (type == HID_COLLECTION_APPLICATION) 177 parser->device->maxapplication++; 178 179 return 0; 180 } 181 182 /* 183 * Close a collection. 184 */ 185 186 static int close_collection(struct hid_parser *parser) 187 { 188 if (!parser->collection_stack_ptr) { 189 hid_err(parser->device, "collection stack underflow\n"); 190 return -EINVAL; 191 } 192 parser->collection_stack_ptr--; 193 return 0; 194 } 195 196 /* 197 * Climb up the stack, search for the specified collection type 198 * and return the usage. 199 */ 200 201 static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type) 202 { 203 struct hid_collection *collection = parser->device->collection; 204 int n; 205 206 for (n = parser->collection_stack_ptr - 1; n >= 0; n--) { 207 unsigned index = parser->collection_stack[n]; 208 if (collection[index].type == type) 209 return collection[index].usage; 210 } 211 return 0; /* we know nothing about this usage type */ 212 } 213 214 /* 215 * Concatenate usage which defines 16 bits or less with the 216 * currently defined usage page to form a 32 bit usage 217 */ 218 219 static void complete_usage(struct hid_parser *parser, unsigned int index) 220 { 221 parser->local.usage[index] &= 0xFFFF; 222 parser->local.usage[index] |= 223 (parser->global.usage_page & 0xFFFF) << 16; 224 } 225 226 /* 227 * Add a usage to the temporary parser table. 228 */ 229 230 static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size) 231 { 232 if (parser->local.usage_index >= HID_MAX_USAGES) { 233 hid_err(parser->device, "usage index exceeded\n"); 234 return -1; 235 } 236 parser->local.usage[parser->local.usage_index] = usage; 237 238 /* 239 * If Usage item only includes usage id, concatenate it with 240 * currently defined usage page 241 */ 242 if (size <= 2) 243 complete_usage(parser, parser->local.usage_index); 244 245 parser->local.usage_size[parser->local.usage_index] = size; 246 parser->local.collection_index[parser->local.usage_index] = 247 parser->collection_stack_ptr ? 248 parser->collection_stack[parser->collection_stack_ptr - 1] : 0; 249 parser->local.usage_index++; 250 return 0; 251 } 252 253 /* 254 * Register a new field for this report. 255 */ 256 257 static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsigned flags) 258 { 259 struct hid_report *report; 260 struct hid_field *field; 261 unsigned int usages; 262 unsigned int offset; 263 unsigned int i; 264 unsigned int application; 265 266 application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION); 267 268 report = hid_register_report(parser->device, report_type, 269 parser->global.report_id, application); 270 if (!report) { 271 hid_err(parser->device, "hid_register_report failed\n"); 272 return -1; 273 } 274 275 /* Handle both signed and unsigned cases properly */ 276 if ((parser->global.logical_minimum < 0 && 277 parser->global.logical_maximum < 278 parser->global.logical_minimum) || 279 (parser->global.logical_minimum >= 0 && 280 (__u32)parser->global.logical_maximum < 281 (__u32)parser->global.logical_minimum)) { 282 dbg_hid("logical range invalid 0x%x 0x%x\n", 283 parser->global.logical_minimum, 284 parser->global.logical_maximum); 285 return -1; 286 } 287 288 offset = report->size; 289 report->size += parser->global.report_size * parser->global.report_count; 290 291 if (!parser->local.usage_index) /* Ignore padding fields */ 292 return 0; 293 294 usages = max_t(unsigned, parser->local.usage_index, 295 parser->global.report_count); 296 297 field = hid_register_field(report, usages, parser->global.report_count); 298 if (!field) 299 return 0; 300 301 field->physical = hid_lookup_collection(parser, HID_COLLECTION_PHYSICAL); 302 field->logical = hid_lookup_collection(parser, HID_COLLECTION_LOGICAL); 303 field->application = application; 304 305 for (i = 0; i < usages; i++) { 306 unsigned j = i; 307 /* Duplicate the last usage we parsed if we have excess values */ 308 if (i >= parser->local.usage_index) 309 j = parser->local.usage_index - 1; 310 field->usage[i].hid = parser->local.usage[j]; 311 field->usage[i].collection_index = 312 parser->local.collection_index[j]; 313 field->usage[i].usage_index = i; 314 field->usage[i].resolution_multiplier = 1; 315 } 316 317 field->maxusage = usages; 318 field->flags = flags; 319 field->report_offset = offset; 320 field->report_type = report_type; 321 field->report_size = parser->global.report_size; 322 field->report_count = parser->global.report_count; 323 field->logical_minimum = parser->global.logical_minimum; 324 field->logical_maximum = parser->global.logical_maximum; 325 field->physical_minimum = parser->global.physical_minimum; 326 field->physical_maximum = parser->global.physical_maximum; 327 field->unit_exponent = parser->global.unit_exponent; 328 field->unit = parser->global.unit; 329 330 return 0; 331 } 332 333 /* 334 * Read data value from item. 335 */ 336 337 static u32 item_udata(struct hid_item *item) 338 { 339 switch (item->size) { 340 case 1: return item->data.u8; 341 case 2: return item->data.u16; 342 case 4: return item->data.u32; 343 } 344 return 0; 345 } 346 347 static s32 item_sdata(struct hid_item *item) 348 { 349 switch (item->size) { 350 case 1: return item->data.s8; 351 case 2: return item->data.s16; 352 case 4: return item->data.s32; 353 } 354 return 0; 355 } 356 357 /* 358 * Process a global item. 359 */ 360 361 static int hid_parser_global(struct hid_parser *parser, struct hid_item *item) 362 { 363 __s32 raw_value; 364 switch (item->tag) { 365 case HID_GLOBAL_ITEM_TAG_PUSH: 366 367 if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) { 368 hid_err(parser->device, "global environment stack overflow\n"); 369 return -1; 370 } 371 372 memcpy(parser->global_stack + parser->global_stack_ptr++, 373 &parser->global, sizeof(struct hid_global)); 374 return 0; 375 376 case HID_GLOBAL_ITEM_TAG_POP: 377 378 if (!parser->global_stack_ptr) { 379 hid_err(parser->device, "global environment stack underflow\n"); 380 return -1; 381 } 382 383 memcpy(&parser->global, parser->global_stack + 384 --parser->global_stack_ptr, sizeof(struct hid_global)); 385 return 0; 386 387 case HID_GLOBAL_ITEM_TAG_USAGE_PAGE: 388 parser->global.usage_page = item_udata(item); 389 return 0; 390 391 case HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM: 392 parser->global.logical_minimum = item_sdata(item); 393 return 0; 394 395 case HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM: 396 if (parser->global.logical_minimum < 0) 397 parser->global.logical_maximum = item_sdata(item); 398 else 399 parser->global.logical_maximum = item_udata(item); 400 return 0; 401 402 case HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM: 403 parser->global.physical_minimum = item_sdata(item); 404 return 0; 405 406 case HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM: 407 if (parser->global.physical_minimum < 0) 408 parser->global.physical_maximum = item_sdata(item); 409 else 410 parser->global.physical_maximum = item_udata(item); 411 return 0; 412 413 case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT: 414 /* Many devices provide unit exponent as a two's complement 415 * nibble due to the common misunderstanding of HID 416 * specification 1.11, 6.2.2.7 Global Items. Attempt to handle 417 * both this and the standard encoding. */ 418 raw_value = item_sdata(item); 419 if (!(raw_value & 0xfffffff0)) 420 parser->global.unit_exponent = hid_snto32(raw_value, 4); 421 else 422 parser->global.unit_exponent = raw_value; 423 return 0; 424 425 case HID_GLOBAL_ITEM_TAG_UNIT: 426 parser->global.unit = item_udata(item); 427 return 0; 428 429 case HID_GLOBAL_ITEM_TAG_REPORT_SIZE: 430 parser->global.report_size = item_udata(item); 431 if (parser->global.report_size > 256) { 432 hid_err(parser->device, "invalid report_size %d\n", 433 parser->global.report_size); 434 return -1; 435 } 436 return 0; 437 438 case HID_GLOBAL_ITEM_TAG_REPORT_COUNT: 439 parser->global.report_count = item_udata(item); 440 if (parser->global.report_count > HID_MAX_USAGES) { 441 hid_err(parser->device, "invalid report_count %d\n", 442 parser->global.report_count); 443 return -1; 444 } 445 return 0; 446 447 case HID_GLOBAL_ITEM_TAG_REPORT_ID: 448 parser->global.report_id = item_udata(item); 449 if (parser->global.report_id == 0 || 450 parser->global.report_id >= HID_MAX_IDS) { 451 hid_err(parser->device, "report_id %u is invalid\n", 452 parser->global.report_id); 453 return -1; 454 } 455 return 0; 456 457 default: 458 hid_err(parser->device, "unknown global tag 0x%x\n", item->tag); 459 return -1; 460 } 461 } 462 463 /* 464 * Process a local item. 465 */ 466 467 static int hid_parser_local(struct hid_parser *parser, struct hid_item *item) 468 { 469 __u32 data; 470 unsigned n; 471 __u32 count; 472 473 data = item_udata(item); 474 475 switch (item->tag) { 476 case HID_LOCAL_ITEM_TAG_DELIMITER: 477 478 if (data) { 479 /* 480 * We treat items before the first delimiter 481 * as global to all usage sets (branch 0). 482 * In the moment we process only these global 483 * items and the first delimiter set. 484 */ 485 if (parser->local.delimiter_depth != 0) { 486 hid_err(parser->device, "nested delimiters\n"); 487 return -1; 488 } 489 parser->local.delimiter_depth++; 490 parser->local.delimiter_branch++; 491 } else { 492 if (parser->local.delimiter_depth < 1) { 493 hid_err(parser->device, "bogus close delimiter\n"); 494 return -1; 495 } 496 parser->local.delimiter_depth--; 497 } 498 return 0; 499 500 case HID_LOCAL_ITEM_TAG_USAGE: 501 502 if (parser->local.delimiter_branch > 1) { 503 dbg_hid("alternative usage ignored\n"); 504 return 0; 505 } 506 507 return hid_add_usage(parser, data, item->size); 508 509 case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM: 510 511 if (parser->local.delimiter_branch > 1) { 512 dbg_hid("alternative usage ignored\n"); 513 return 0; 514 } 515 516 parser->local.usage_minimum = data; 517 return 0; 518 519 case HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM: 520 521 if (parser->local.delimiter_branch > 1) { 522 dbg_hid("alternative usage ignored\n"); 523 return 0; 524 } 525 526 count = data - parser->local.usage_minimum; 527 if (count + parser->local.usage_index >= HID_MAX_USAGES) { 528 /* 529 * We do not warn if the name is not set, we are 530 * actually pre-scanning the device. 531 */ 532 if (dev_name(&parser->device->dev)) 533 hid_warn(parser->device, 534 "ignoring exceeding usage max\n"); 535 data = HID_MAX_USAGES - parser->local.usage_index + 536 parser->local.usage_minimum - 1; 537 if (data <= 0) { 538 hid_err(parser->device, 539 "no more usage index available\n"); 540 return -1; 541 } 542 } 543 544 for (n = parser->local.usage_minimum; n <= data; n++) 545 if (hid_add_usage(parser, n, item->size)) { 546 dbg_hid("hid_add_usage failed\n"); 547 return -1; 548 } 549 return 0; 550 551 default: 552 553 dbg_hid("unknown local item tag 0x%x\n", item->tag); 554 return 0; 555 } 556 return 0; 557 } 558 559 /* 560 * Concatenate Usage Pages into Usages where relevant: 561 * As per specification, 6.2.2.8: "When the parser encounters a main item it 562 * concatenates the last declared Usage Page with a Usage to form a complete 563 * usage value." 564 */ 565 566 static void hid_concatenate_last_usage_page(struct hid_parser *parser) 567 { 568 int i; 569 unsigned int usage_page; 570 unsigned int current_page; 571 572 if (!parser->local.usage_index) 573 return; 574 575 usage_page = parser->global.usage_page; 576 577 /* 578 * Concatenate usage page again only if last declared Usage Page 579 * has not been already used in previous usages concatenation 580 */ 581 for (i = parser->local.usage_index - 1; i >= 0; i--) { 582 if (parser->local.usage_size[i] > 2) 583 /* Ignore extended usages */ 584 continue; 585 586 current_page = parser->local.usage[i] >> 16; 587 if (current_page == usage_page) 588 break; 589 590 complete_usage(parser, i); 591 } 592 } 593 594 /* 595 * Process a main item. 596 */ 597 598 static int hid_parser_main(struct hid_parser *parser, struct hid_item *item) 599 { 600 __u32 data; 601 int ret; 602 603 hid_concatenate_last_usage_page(parser); 604 605 data = item_udata(item); 606 607 switch (item->tag) { 608 case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION: 609 ret = open_collection(parser, data & 0xff); 610 break; 611 case HID_MAIN_ITEM_TAG_END_COLLECTION: 612 ret = close_collection(parser); 613 break; 614 case HID_MAIN_ITEM_TAG_INPUT: 615 ret = hid_add_field(parser, HID_INPUT_REPORT, data); 616 break; 617 case HID_MAIN_ITEM_TAG_OUTPUT: 618 ret = hid_add_field(parser, HID_OUTPUT_REPORT, data); 619 break; 620 case HID_MAIN_ITEM_TAG_FEATURE: 621 ret = hid_add_field(parser, HID_FEATURE_REPORT, data); 622 break; 623 default: 624 hid_warn(parser->device, "unknown main item tag 0x%x\n", item->tag); 625 ret = 0; 626 } 627 628 memset(&parser->local, 0, sizeof(parser->local)); /* Reset the local parser environment */ 629 630 return ret; 631 } 632 633 /* 634 * Process a reserved item. 635 */ 636 637 static int hid_parser_reserved(struct hid_parser *parser, struct hid_item *item) 638 { 639 dbg_hid("reserved item type, tag 0x%x\n", item->tag); 640 return 0; 641 } 642 643 /* 644 * Free a report and all registered fields. The field->usage and 645 * field->value table's are allocated behind the field, so we need 646 * only to free(field) itself. 647 */ 648 649 static void hid_free_report(struct hid_report *report) 650 { 651 unsigned n; 652 653 for (n = 0; n < report->maxfield; n++) 654 kfree(report->field[n]); 655 kfree(report); 656 } 657 658 /* 659 * Close report. This function returns the device 660 * state to the point prior to hid_open_report(). 661 */ 662 static void hid_close_report(struct hid_device *device) 663 { 664 unsigned i, j; 665 666 for (i = 0; i < HID_REPORT_TYPES; i++) { 667 struct hid_report_enum *report_enum = device->report_enum + i; 668 669 for (j = 0; j < HID_MAX_IDS; j++) { 670 struct hid_report *report = report_enum->report_id_hash[j]; 671 if (report) 672 hid_free_report(report); 673 } 674 memset(report_enum, 0, sizeof(*report_enum)); 675 INIT_LIST_HEAD(&report_enum->report_list); 676 } 677 678 kfree(device->rdesc); 679 device->rdesc = NULL; 680 device->rsize = 0; 681 682 kfree(device->collection); 683 device->collection = NULL; 684 device->collection_size = 0; 685 device->maxcollection = 0; 686 device->maxapplication = 0; 687 688 device->status &= ~HID_STAT_PARSED; 689 } 690 691 /* 692 * Free a device structure, all reports, and all fields. 693 */ 694 695 static void hid_device_release(struct device *dev) 696 { 697 struct hid_device *hid = to_hid_device(dev); 698 699 hid_close_report(hid); 700 kfree(hid->dev_rdesc); 701 kfree(hid); 702 } 703 704 /* 705 * Fetch a report description item from the data stream. We support long 706 * items, though they are not used yet. 707 */ 708 709 static u8 *fetch_item(__u8 *start, __u8 *end, struct hid_item *item) 710 { 711 u8 b; 712 713 if ((end - start) <= 0) 714 return NULL; 715 716 b = *start++; 717 718 item->type = (b >> 2) & 3; 719 item->tag = (b >> 4) & 15; 720 721 if (item->tag == HID_ITEM_TAG_LONG) { 722 723 item->format = HID_ITEM_FORMAT_LONG; 724 725 if ((end - start) < 2) 726 return NULL; 727 728 item->size = *start++; 729 item->tag = *start++; 730 731 if ((end - start) < item->size) 732 return NULL; 733 734 item->data.longdata = start; 735 start += item->size; 736 return start; 737 } 738 739 item->format = HID_ITEM_FORMAT_SHORT; 740 item->size = b & 3; 741 742 switch (item->size) { 743 case 0: 744 return start; 745 746 case 1: 747 if ((end - start) < 1) 748 return NULL; 749 item->data.u8 = *start++; 750 return start; 751 752 case 2: 753 if ((end - start) < 2) 754 return NULL; 755 item->data.u16 = get_unaligned_le16(start); 756 start = (__u8 *)((__le16 *)start + 1); 757 return start; 758 759 case 3: 760 item->size++; 761 if ((end - start) < 4) 762 return NULL; 763 item->data.u32 = get_unaligned_le32(start); 764 start = (__u8 *)((__le32 *)start + 1); 765 return start; 766 } 767 768 return NULL; 769 } 770 771 static void hid_scan_input_usage(struct hid_parser *parser, u32 usage) 772 { 773 struct hid_device *hid = parser->device; 774 775 if (usage == HID_DG_CONTACTID) 776 hid->group = HID_GROUP_MULTITOUCH; 777 } 778 779 static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage) 780 { 781 if (usage == 0xff0000c5 && parser->global.report_count == 256 && 782 parser->global.report_size == 8) 783 parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8; 784 785 if (usage == 0xff0000c6 && parser->global.report_count == 1 && 786 parser->global.report_size == 8) 787 parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8; 788 } 789 790 static void hid_scan_collection(struct hid_parser *parser, unsigned type) 791 { 792 struct hid_device *hid = parser->device; 793 int i; 794 795 if (((parser->global.usage_page << 16) == HID_UP_SENSOR) && 796 type == HID_COLLECTION_PHYSICAL) 797 hid->group = HID_GROUP_SENSOR_HUB; 798 799 if (hid->vendor == USB_VENDOR_ID_MICROSOFT && 800 hid->product == USB_DEVICE_ID_MS_POWER_COVER && 801 hid->group == HID_GROUP_MULTITOUCH) 802 hid->group = HID_GROUP_GENERIC; 803 804 if ((parser->global.usage_page << 16) == HID_UP_GENDESK) 805 for (i = 0; i < parser->local.usage_index; i++) 806 if (parser->local.usage[i] == HID_GD_POINTER) 807 parser->scan_flags |= HID_SCAN_FLAG_GD_POINTER; 808 809 if ((parser->global.usage_page << 16) >= HID_UP_MSVENDOR) 810 parser->scan_flags |= HID_SCAN_FLAG_VENDOR_SPECIFIC; 811 } 812 813 static int hid_scan_main(struct hid_parser *parser, struct hid_item *item) 814 { 815 __u32 data; 816 int i; 817 818 hid_concatenate_last_usage_page(parser); 819 820 data = item_udata(item); 821 822 switch (item->tag) { 823 case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION: 824 hid_scan_collection(parser, data & 0xff); 825 break; 826 case HID_MAIN_ITEM_TAG_END_COLLECTION: 827 break; 828 case HID_MAIN_ITEM_TAG_INPUT: 829 /* ignore constant inputs, they will be ignored by hid-input */ 830 if (data & HID_MAIN_ITEM_CONSTANT) 831 break; 832 for (i = 0; i < parser->local.usage_index; i++) 833 hid_scan_input_usage(parser, parser->local.usage[i]); 834 break; 835 case HID_MAIN_ITEM_TAG_OUTPUT: 836 break; 837 case HID_MAIN_ITEM_TAG_FEATURE: 838 for (i = 0; i < parser->local.usage_index; i++) 839 hid_scan_feature_usage(parser, parser->local.usage[i]); 840 break; 841 } 842 843 /* Reset the local parser environment */ 844 memset(&parser->local, 0, sizeof(parser->local)); 845 846 return 0; 847 } 848 849 /* 850 * Scan a report descriptor before the device is added to the bus. 851 * Sets device groups and other properties that determine what driver 852 * to load. 853 */ 854 static int hid_scan_report(struct hid_device *hid) 855 { 856 struct hid_parser *parser; 857 struct hid_item item; 858 __u8 *start = hid->dev_rdesc; 859 __u8 *end = start + hid->dev_rsize; 860 static int (*dispatch_type[])(struct hid_parser *parser, 861 struct hid_item *item) = { 862 hid_scan_main, 863 hid_parser_global, 864 hid_parser_local, 865 hid_parser_reserved 866 }; 867 868 parser = vzalloc(sizeof(struct hid_parser)); 869 if (!parser) 870 return -ENOMEM; 871 872 parser->device = hid; 873 hid->group = HID_GROUP_GENERIC; 874 875 /* 876 * The parsing is simpler than the one in hid_open_report() as we should 877 * be robust against hid errors. Those errors will be raised by 878 * hid_open_report() anyway. 879 */ 880 while ((start = fetch_item(start, end, &item)) != NULL) 881 dispatch_type[item.type](parser, &item); 882 883 /* 884 * Handle special flags set during scanning. 885 */ 886 if ((parser->scan_flags & HID_SCAN_FLAG_MT_WIN_8) && 887 (hid->group == HID_GROUP_MULTITOUCH)) 888 hid->group = HID_GROUP_MULTITOUCH_WIN_8; 889 890 /* 891 * Vendor specific handlings 892 */ 893 switch (hid->vendor) { 894 case USB_VENDOR_ID_WACOM: 895 hid->group = HID_GROUP_WACOM; 896 break; 897 case USB_VENDOR_ID_SYNAPTICS: 898 if (hid->group == HID_GROUP_GENERIC) 899 if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC) 900 && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER)) 901 /* 902 * hid-rmi should take care of them, 903 * not hid-generic 904 */ 905 hid->group = HID_GROUP_RMI; 906 break; 907 } 908 909 kfree(parser->collection_stack); 910 vfree(parser); 911 return 0; 912 } 913 914 /** 915 * hid_parse_report - parse device report 916 * 917 * @device: hid device 918 * @start: report start 919 * @size: report size 920 * 921 * Allocate the device report as read by the bus driver. This function should 922 * only be called from parse() in ll drivers. 923 */ 924 int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size) 925 { 926 hid->dev_rdesc = kmemdup(start, size, GFP_KERNEL); 927 if (!hid->dev_rdesc) 928 return -ENOMEM; 929 hid->dev_rsize = size; 930 return 0; 931 } 932 EXPORT_SYMBOL_GPL(hid_parse_report); 933 934 static const char * const hid_report_names[] = { 935 "HID_INPUT_REPORT", 936 "HID_OUTPUT_REPORT", 937 "HID_FEATURE_REPORT", 938 }; 939 /** 940 * hid_validate_values - validate existing device report's value indexes 941 * 942 * @device: hid device 943 * @type: which report type to examine 944 * @id: which report ID to examine (0 for first) 945 * @field_index: which report field to examine 946 * @report_counts: expected number of values 947 * 948 * Validate the number of values in a given field of a given report, after 949 * parsing. 950 */ 951 struct hid_report *hid_validate_values(struct hid_device *hid, 952 unsigned int type, unsigned int id, 953 unsigned int field_index, 954 unsigned int report_counts) 955 { 956 struct hid_report *report; 957 958 if (type > HID_FEATURE_REPORT) { 959 hid_err(hid, "invalid HID report type %u\n", type); 960 return NULL; 961 } 962 963 if (id >= HID_MAX_IDS) { 964 hid_err(hid, "invalid HID report id %u\n", id); 965 return NULL; 966 } 967 968 /* 969 * Explicitly not using hid_get_report() here since it depends on 970 * ->numbered being checked, which may not always be the case when 971 * drivers go to access report values. 972 */ 973 if (id == 0) { 974 /* 975 * Validating on id 0 means we should examine the first 976 * report in the list. 977 */ 978 report = list_entry( 979 hid->report_enum[type].report_list.next, 980 struct hid_report, list); 981 } else { 982 report = hid->report_enum[type].report_id_hash[id]; 983 } 984 if (!report) { 985 hid_err(hid, "missing %s %u\n", hid_report_names[type], id); 986 return NULL; 987 } 988 if (report->maxfield <= field_index) { 989 hid_err(hid, "not enough fields in %s %u\n", 990 hid_report_names[type], id); 991 return NULL; 992 } 993 if (report->field[field_index]->report_count < report_counts) { 994 hid_err(hid, "not enough values in %s %u field %u\n", 995 hid_report_names[type], id, field_index); 996 return NULL; 997 } 998 return report; 999 } 1000 EXPORT_SYMBOL_GPL(hid_validate_values); 1001 1002 static int hid_calculate_multiplier(struct hid_device *hid, 1003 struct hid_field *multiplier) 1004 { 1005 int m; 1006 __s32 v = *multiplier->value; 1007 __s32 lmin = multiplier->logical_minimum; 1008 __s32 lmax = multiplier->logical_maximum; 1009 __s32 pmin = multiplier->physical_minimum; 1010 __s32 pmax = multiplier->physical_maximum; 1011 1012 /* 1013 * "Because OS implementations will generally divide the control's 1014 * reported count by the Effective Resolution Multiplier, designers 1015 * should take care not to establish a potential Effective 1016 * Resolution Multiplier of zero." 1017 * HID Usage Table, v1.12, Section 4.3.1, p31 1018 */ 1019 if (lmax - lmin == 0) 1020 return 1; 1021 /* 1022 * Handling the unit exponent is left as an exercise to whoever 1023 * finds a device where that exponent is not 0. 1024 */ 1025 m = ((v - lmin)/(lmax - lmin) * (pmax - pmin) + pmin); 1026 if (unlikely(multiplier->unit_exponent != 0)) { 1027 hid_warn(hid, 1028 "unsupported Resolution Multiplier unit exponent %d\n", 1029 multiplier->unit_exponent); 1030 } 1031 1032 /* There are no devices with an effective multiplier > 255 */ 1033 if (unlikely(m == 0 || m > 255 || m < -255)) { 1034 hid_warn(hid, "unsupported Resolution Multiplier %d\n", m); 1035 m = 1; 1036 } 1037 1038 return m; 1039 } 1040 1041 static void hid_apply_multiplier_to_field(struct hid_device *hid, 1042 struct hid_field *field, 1043 struct hid_collection *multiplier_collection, 1044 int effective_multiplier) 1045 { 1046 struct hid_collection *collection; 1047 struct hid_usage *usage; 1048 int i; 1049 1050 /* 1051 * If multiplier_collection is NULL, the multiplier applies 1052 * to all fields in the report. 1053 * Otherwise, it is the Logical Collection the multiplier applies to 1054 * but our field may be in a subcollection of that collection. 1055 */ 1056 for (i = 0; i < field->maxusage; i++) { 1057 usage = &field->usage[i]; 1058 1059 collection = &hid->collection[usage->collection_index]; 1060 while (collection->parent_idx != -1 && 1061 collection != multiplier_collection) 1062 collection = &hid->collection[collection->parent_idx]; 1063 1064 if (collection->parent_idx != -1 || 1065 multiplier_collection == NULL) 1066 usage->resolution_multiplier = effective_multiplier; 1067 1068 } 1069 } 1070 1071 static void hid_apply_multiplier(struct hid_device *hid, 1072 struct hid_field *multiplier) 1073 { 1074 struct hid_report_enum *rep_enum; 1075 struct hid_report *rep; 1076 struct hid_field *field; 1077 struct hid_collection *multiplier_collection; 1078 int effective_multiplier; 1079 int i; 1080 1081 /* 1082 * "The Resolution Multiplier control must be contained in the same 1083 * Logical Collection as the control(s) to which it is to be applied. 1084 * If no Resolution Multiplier is defined, then the Resolution 1085 * Multiplier defaults to 1. If more than one control exists in a 1086 * Logical Collection, the Resolution Multiplier is associated with 1087 * all controls in the collection. If no Logical Collection is 1088 * defined, the Resolution Multiplier is associated with all 1089 * controls in the report." 1090 * HID Usage Table, v1.12, Section 4.3.1, p30 1091 * 1092 * Thus, search from the current collection upwards until we find a 1093 * logical collection. Then search all fields for that same parent 1094 * collection. Those are the fields the multiplier applies to. 1095 * 1096 * If we have more than one multiplier, it will overwrite the 1097 * applicable fields later. 1098 */ 1099 multiplier_collection = &hid->collection[multiplier->usage->collection_index]; 1100 while (multiplier_collection->parent_idx != -1 && 1101 multiplier_collection->type != HID_COLLECTION_LOGICAL) 1102 multiplier_collection = &hid->collection[multiplier_collection->parent_idx]; 1103 1104 effective_multiplier = hid_calculate_multiplier(hid, multiplier); 1105 1106 rep_enum = &hid->report_enum[HID_INPUT_REPORT]; 1107 list_for_each_entry(rep, &rep_enum->report_list, list) { 1108 for (i = 0; i < rep->maxfield; i++) { 1109 field = rep->field[i]; 1110 hid_apply_multiplier_to_field(hid, field, 1111 multiplier_collection, 1112 effective_multiplier); 1113 } 1114 } 1115 } 1116 1117 /* 1118 * hid_setup_resolution_multiplier - set up all resolution multipliers 1119 * 1120 * @device: hid device 1121 * 1122 * Search for all Resolution Multiplier Feature Reports and apply their 1123 * value to all matching Input items. This only updates the internal struct 1124 * fields. 1125 * 1126 * The Resolution Multiplier is applied by the hardware. If the multiplier 1127 * is anything other than 1, the hardware will send pre-multiplied events 1128 * so that the same physical interaction generates an accumulated 1129 * accumulated_value = value * * multiplier 1130 * This may be achieved by sending 1131 * - "value * multiplier" for each event, or 1132 * - "value" but "multiplier" times as frequently, or 1133 * - a combination of the above 1134 * The only guarantee is that the same physical interaction always generates 1135 * an accumulated 'value * multiplier'. 1136 * 1137 * This function must be called before any event processing and after 1138 * any SetRequest to the Resolution Multiplier. 1139 */ 1140 void hid_setup_resolution_multiplier(struct hid_device *hid) 1141 { 1142 struct hid_report_enum *rep_enum; 1143 struct hid_report *rep; 1144 struct hid_usage *usage; 1145 int i, j; 1146 1147 rep_enum = &hid->report_enum[HID_FEATURE_REPORT]; 1148 list_for_each_entry(rep, &rep_enum->report_list, list) { 1149 for (i = 0; i < rep->maxfield; i++) { 1150 /* Ignore if report count is out of bounds. */ 1151 if (rep->field[i]->report_count < 1) 1152 continue; 1153 1154 for (j = 0; j < rep->field[i]->maxusage; j++) { 1155 usage = &rep->field[i]->usage[j]; 1156 if (usage->hid == HID_GD_RESOLUTION_MULTIPLIER) 1157 hid_apply_multiplier(hid, 1158 rep->field[i]); 1159 } 1160 } 1161 } 1162 } 1163 EXPORT_SYMBOL_GPL(hid_setup_resolution_multiplier); 1164 1165 /** 1166 * hid_open_report - open a driver-specific device report 1167 * 1168 * @device: hid device 1169 * 1170 * Parse a report description into a hid_device structure. Reports are 1171 * enumerated, fields are attached to these reports. 1172 * 0 returned on success, otherwise nonzero error value. 1173 * 1174 * This function (or the equivalent hid_parse() macro) should only be 1175 * called from probe() in drivers, before starting the device. 1176 */ 1177 int hid_open_report(struct hid_device *device) 1178 { 1179 struct hid_parser *parser; 1180 struct hid_item item; 1181 unsigned int size; 1182 __u8 *start; 1183 __u8 *buf; 1184 __u8 *end; 1185 __u8 *next; 1186 int ret; 1187 static int (*dispatch_type[])(struct hid_parser *parser, 1188 struct hid_item *item) = { 1189 hid_parser_main, 1190 hid_parser_global, 1191 hid_parser_local, 1192 hid_parser_reserved 1193 }; 1194 1195 if (WARN_ON(device->status & HID_STAT_PARSED)) 1196 return -EBUSY; 1197 1198 start = device->dev_rdesc; 1199 if (WARN_ON(!start)) 1200 return -ENODEV; 1201 size = device->dev_rsize; 1202 1203 buf = kmemdup(start, size, GFP_KERNEL); 1204 if (buf == NULL) 1205 return -ENOMEM; 1206 1207 if (device->driver->report_fixup) 1208 start = device->driver->report_fixup(device, buf, &size); 1209 else 1210 start = buf; 1211 1212 start = kmemdup(start, size, GFP_KERNEL); 1213 kfree(buf); 1214 if (start == NULL) 1215 return -ENOMEM; 1216 1217 device->rdesc = start; 1218 device->rsize = size; 1219 1220 parser = vzalloc(sizeof(struct hid_parser)); 1221 if (!parser) { 1222 ret = -ENOMEM; 1223 goto alloc_err; 1224 } 1225 1226 parser->device = device; 1227 1228 end = start + size; 1229 1230 device->collection = kcalloc(HID_DEFAULT_NUM_COLLECTIONS, 1231 sizeof(struct hid_collection), GFP_KERNEL); 1232 if (!device->collection) { 1233 ret = -ENOMEM; 1234 goto err; 1235 } 1236 device->collection_size = HID_DEFAULT_NUM_COLLECTIONS; 1237 1238 ret = -EINVAL; 1239 while ((next = fetch_item(start, end, &item)) != NULL) { 1240 start = next; 1241 1242 if (item.format != HID_ITEM_FORMAT_SHORT) { 1243 hid_err(device, "unexpected long global item\n"); 1244 goto err; 1245 } 1246 1247 if (dispatch_type[item.type](parser, &item)) { 1248 hid_err(device, "item %u %u %u %u parsing failed\n", 1249 item.format, (unsigned)item.size, 1250 (unsigned)item.type, (unsigned)item.tag); 1251 goto err; 1252 } 1253 1254 if (start == end) { 1255 if (parser->collection_stack_ptr) { 1256 hid_err(device, "unbalanced collection at end of report description\n"); 1257 goto err; 1258 } 1259 if (parser->local.delimiter_depth) { 1260 hid_err(device, "unbalanced delimiter at end of report description\n"); 1261 goto err; 1262 } 1263 1264 /* 1265 * fetch initial values in case the device's 1266 * default multiplier isn't the recommended 1 1267 */ 1268 hid_setup_resolution_multiplier(device); 1269 1270 kfree(parser->collection_stack); 1271 vfree(parser); 1272 device->status |= HID_STAT_PARSED; 1273 1274 return 0; 1275 } 1276 } 1277 1278 hid_err(device, "item fetching failed at offset %u/%u\n", 1279 size - (unsigned int)(end - start), size); 1280 err: 1281 kfree(parser->collection_stack); 1282 alloc_err: 1283 vfree(parser); 1284 hid_close_report(device); 1285 return ret; 1286 } 1287 EXPORT_SYMBOL_GPL(hid_open_report); 1288 1289 /* 1290 * Convert a signed n-bit integer to signed 32-bit integer. Common 1291 * cases are done through the compiler, the screwed things has to be 1292 * done by hand. 1293 */ 1294 1295 static s32 snto32(__u32 value, unsigned n) 1296 { 1297 switch (n) { 1298 case 8: return ((__s8)value); 1299 case 16: return ((__s16)value); 1300 case 32: return ((__s32)value); 1301 } 1302 return value & (1 << (n - 1)) ? value | (~0U << n) : value; 1303 } 1304 1305 s32 hid_snto32(__u32 value, unsigned n) 1306 { 1307 return snto32(value, n); 1308 } 1309 EXPORT_SYMBOL_GPL(hid_snto32); 1310 1311 /* 1312 * Convert a signed 32-bit integer to a signed n-bit integer. 1313 */ 1314 1315 static u32 s32ton(__s32 value, unsigned n) 1316 { 1317 s32 a = value >> (n - 1); 1318 if (a && a != -1) 1319 return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1; 1320 return value & ((1 << n) - 1); 1321 } 1322 1323 /* 1324 * Extract/implement a data field from/to a little endian report (bit array). 1325 * 1326 * Code sort-of follows HID spec: 1327 * http://www.usb.org/developers/hidpage/HID1_11.pdf 1328 * 1329 * While the USB HID spec allows unlimited length bit fields in "report 1330 * descriptors", most devices never use more than 16 bits. 1331 * One model of UPS is claimed to report "LINEV" as a 32-bit field. 1332 * Search linux-kernel and linux-usb-devel archives for "hid-core extract". 1333 */ 1334 1335 static u32 __extract(u8 *report, unsigned offset, int n) 1336 { 1337 unsigned int idx = offset / 8; 1338 unsigned int bit_nr = 0; 1339 unsigned int bit_shift = offset % 8; 1340 int bits_to_copy = 8 - bit_shift; 1341 u32 value = 0; 1342 u32 mask = n < 32 ? (1U << n) - 1 : ~0U; 1343 1344 while (n > 0) { 1345 value |= ((u32)report[idx] >> bit_shift) << bit_nr; 1346 n -= bits_to_copy; 1347 bit_nr += bits_to_copy; 1348 bits_to_copy = 8; 1349 bit_shift = 0; 1350 idx++; 1351 } 1352 1353 return value & mask; 1354 } 1355 1356 u32 hid_field_extract(const struct hid_device *hid, u8 *report, 1357 unsigned offset, unsigned n) 1358 { 1359 if (n > 32) { 1360 hid_warn_once(hid, "%s() called with n (%d) > 32! (%s)\n", 1361 __func__, n, current->comm); 1362 n = 32; 1363 } 1364 1365 return __extract(report, offset, n); 1366 } 1367 EXPORT_SYMBOL_GPL(hid_field_extract); 1368 1369 /* 1370 * "implement" : set bits in a little endian bit stream. 1371 * Same concepts as "extract" (see comments above). 1372 * The data mangled in the bit stream remains in little endian 1373 * order the whole time. It make more sense to talk about 1374 * endianness of register values by considering a register 1375 * a "cached" copy of the little endian bit stream. 1376 */ 1377 1378 static void __implement(u8 *report, unsigned offset, int n, u32 value) 1379 { 1380 unsigned int idx = offset / 8; 1381 unsigned int bit_shift = offset % 8; 1382 int bits_to_set = 8 - bit_shift; 1383 1384 while (n - bits_to_set >= 0) { 1385 report[idx] &= ~(0xff << bit_shift); 1386 report[idx] |= value << bit_shift; 1387 value >>= bits_to_set; 1388 n -= bits_to_set; 1389 bits_to_set = 8; 1390 bit_shift = 0; 1391 idx++; 1392 } 1393 1394 /* last nibble */ 1395 if (n) { 1396 u8 bit_mask = ((1U << n) - 1); 1397 report[idx] &= ~(bit_mask << bit_shift); 1398 report[idx] |= value << bit_shift; 1399 } 1400 } 1401 1402 static void implement(const struct hid_device *hid, u8 *report, 1403 unsigned offset, unsigned n, u32 value) 1404 { 1405 if (unlikely(n > 32)) { 1406 hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n", 1407 __func__, n, current->comm); 1408 n = 32; 1409 } else if (n < 32) { 1410 u32 m = (1U << n) - 1; 1411 1412 if (unlikely(value > m)) { 1413 hid_warn(hid, 1414 "%s() called with too large value %d (n: %d)! (%s)\n", 1415 __func__, value, n, current->comm); 1416 WARN_ON(1); 1417 value &= m; 1418 } 1419 } 1420 1421 __implement(report, offset, n, value); 1422 } 1423 1424 /* 1425 * Search an array for a value. 1426 */ 1427 1428 static int search(__s32 *array, __s32 value, unsigned n) 1429 { 1430 while (n--) { 1431 if (*array++ == value) 1432 return 0; 1433 } 1434 return -1; 1435 } 1436 1437 /** 1438 * hid_match_report - check if driver's raw_event should be called 1439 * 1440 * @hid: hid device 1441 * @report_type: type to match against 1442 * 1443 * compare hid->driver->report_table->report_type to report->type 1444 */ 1445 static int hid_match_report(struct hid_device *hid, struct hid_report *report) 1446 { 1447 const struct hid_report_id *id = hid->driver->report_table; 1448 1449 if (!id) /* NULL means all */ 1450 return 1; 1451 1452 for (; id->report_type != HID_TERMINATOR; id++) 1453 if (id->report_type == HID_ANY_ID || 1454 id->report_type == report->type) 1455 return 1; 1456 return 0; 1457 } 1458 1459 /** 1460 * hid_match_usage - check if driver's event should be called 1461 * 1462 * @hid: hid device 1463 * @usage: usage to match against 1464 * 1465 * compare hid->driver->usage_table->usage_{type,code} to 1466 * usage->usage_{type,code} 1467 */ 1468 static int hid_match_usage(struct hid_device *hid, struct hid_usage *usage) 1469 { 1470 const struct hid_usage_id *id = hid->driver->usage_table; 1471 1472 if (!id) /* NULL means all */ 1473 return 1; 1474 1475 for (; id->usage_type != HID_ANY_ID - 1; id++) 1476 if ((id->usage_hid == HID_ANY_ID || 1477 id->usage_hid == usage->hid) && 1478 (id->usage_type == HID_ANY_ID || 1479 id->usage_type == usage->type) && 1480 (id->usage_code == HID_ANY_ID || 1481 id->usage_code == usage->code)) 1482 return 1; 1483 return 0; 1484 } 1485 1486 static void hid_process_event(struct hid_device *hid, struct hid_field *field, 1487 struct hid_usage *usage, __s32 value, int interrupt) 1488 { 1489 struct hid_driver *hdrv = hid->driver; 1490 int ret; 1491 1492 if (!list_empty(&hid->debug_list)) 1493 hid_dump_input(hid, usage, value); 1494 1495 if (hdrv && hdrv->event && hid_match_usage(hid, usage)) { 1496 ret = hdrv->event(hid, field, usage, value); 1497 if (ret != 0) { 1498 if (ret < 0) 1499 hid_err(hid, "%s's event failed with %d\n", 1500 hdrv->name, ret); 1501 return; 1502 } 1503 } 1504 1505 if (hid->claimed & HID_CLAIMED_INPUT) 1506 hidinput_hid_event(hid, field, usage, value); 1507 if (hid->claimed & HID_CLAIMED_HIDDEV && interrupt && hid->hiddev_hid_event) 1508 hid->hiddev_hid_event(hid, field, usage, value); 1509 } 1510 1511 /* 1512 * Analyse a received field, and fetch the data from it. The field 1513 * content is stored for next report processing (we do differential 1514 * reporting to the layer). 1515 */ 1516 1517 static void hid_input_field(struct hid_device *hid, struct hid_field *field, 1518 __u8 *data, int interrupt) 1519 { 1520 unsigned n; 1521 unsigned count = field->report_count; 1522 unsigned offset = field->report_offset; 1523 unsigned size = field->report_size; 1524 __s32 min = field->logical_minimum; 1525 __s32 max = field->logical_maximum; 1526 __s32 *value; 1527 1528 value = kmalloc_array(count, sizeof(__s32), GFP_ATOMIC); 1529 if (!value) 1530 return; 1531 1532 for (n = 0; n < count; n++) { 1533 1534 value[n] = min < 0 ? 1535 snto32(hid_field_extract(hid, data, offset + n * size, 1536 size), size) : 1537 hid_field_extract(hid, data, offset + n * size, size); 1538 1539 /* Ignore report if ErrorRollOver */ 1540 if (!(field->flags & HID_MAIN_ITEM_VARIABLE) && 1541 value[n] >= min && value[n] <= max && 1542 value[n] - min < field->maxusage && 1543 field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1) 1544 goto exit; 1545 } 1546 1547 for (n = 0; n < count; n++) { 1548 1549 if (HID_MAIN_ITEM_VARIABLE & field->flags) { 1550 hid_process_event(hid, field, &field->usage[n], value[n], interrupt); 1551 continue; 1552 } 1553 1554 if (field->value[n] >= min && field->value[n] <= max 1555 && field->value[n] - min < field->maxusage 1556 && field->usage[field->value[n] - min].hid 1557 && search(value, field->value[n], count)) 1558 hid_process_event(hid, field, &field->usage[field->value[n] - min], 0, interrupt); 1559 1560 if (value[n] >= min && value[n] <= max 1561 && value[n] - min < field->maxusage 1562 && field->usage[value[n] - min].hid 1563 && search(field->value, value[n], count)) 1564 hid_process_event(hid, field, &field->usage[value[n] - min], 1, interrupt); 1565 } 1566 1567 memcpy(field->value, value, count * sizeof(__s32)); 1568 exit: 1569 kfree(value); 1570 } 1571 1572 /* 1573 * Output the field into the report. 1574 */ 1575 1576 static void hid_output_field(const struct hid_device *hid, 1577 struct hid_field *field, __u8 *data) 1578 { 1579 unsigned count = field->report_count; 1580 unsigned offset = field->report_offset; 1581 unsigned size = field->report_size; 1582 unsigned n; 1583 1584 for (n = 0; n < count; n++) { 1585 if (field->logical_minimum < 0) /* signed values */ 1586 implement(hid, data, offset + n * size, size, 1587 s32ton(field->value[n], size)); 1588 else /* unsigned values */ 1589 implement(hid, data, offset + n * size, size, 1590 field->value[n]); 1591 } 1592 } 1593 1594 /* 1595 * Create a report. 'data' has to be allocated using 1596 * hid_alloc_report_buf() so that it has proper size. 1597 */ 1598 1599 void hid_output_report(struct hid_report *report, __u8 *data) 1600 { 1601 unsigned n; 1602 1603 if (report->id > 0) 1604 *data++ = report->id; 1605 1606 memset(data, 0, ((report->size - 1) >> 3) + 1); 1607 for (n = 0; n < report->maxfield; n++) 1608 hid_output_field(report->device, report->field[n], data); 1609 } 1610 EXPORT_SYMBOL_GPL(hid_output_report); 1611 1612 /* 1613 * Allocator for buffer that is going to be passed to hid_output_report() 1614 */ 1615 u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags) 1616 { 1617 /* 1618 * 7 extra bytes are necessary to achieve proper functionality 1619 * of implement() working on 8 byte chunks 1620 */ 1621 1622 u32 len = hid_report_len(report) + 7; 1623 1624 return kmalloc(len, flags); 1625 } 1626 EXPORT_SYMBOL_GPL(hid_alloc_report_buf); 1627 1628 /* 1629 * Set a field value. The report this field belongs to has to be 1630 * created and transferred to the device, to set this value in the 1631 * device. 1632 */ 1633 1634 int hid_set_field(struct hid_field *field, unsigned offset, __s32 value) 1635 { 1636 unsigned size; 1637 1638 if (!field) 1639 return -1; 1640 1641 size = field->report_size; 1642 1643 hid_dump_input(field->report->device, field->usage + offset, value); 1644 1645 if (offset >= field->report_count) { 1646 hid_err(field->report->device, "offset (%d) exceeds report_count (%d)\n", 1647 offset, field->report_count); 1648 return -1; 1649 } 1650 if (field->logical_minimum < 0) { 1651 if (value != snto32(s32ton(value, size), size)) { 1652 hid_err(field->report->device, "value %d is out of range\n", value); 1653 return -1; 1654 } 1655 } 1656 field->value[offset] = value; 1657 return 0; 1658 } 1659 EXPORT_SYMBOL_GPL(hid_set_field); 1660 1661 static struct hid_report *hid_get_report(struct hid_report_enum *report_enum, 1662 const u8 *data) 1663 { 1664 struct hid_report *report; 1665 unsigned int n = 0; /* Normally report number is 0 */ 1666 1667 /* Device uses numbered reports, data[0] is report number */ 1668 if (report_enum->numbered) 1669 n = *data; 1670 1671 report = report_enum->report_id_hash[n]; 1672 if (report == NULL) 1673 dbg_hid("undefined report_id %u received\n", n); 1674 1675 return report; 1676 } 1677 1678 /* 1679 * Implement a generic .request() callback, using .raw_request() 1680 * DO NOT USE in hid drivers directly, but through hid_hw_request instead. 1681 */ 1682 int __hid_request(struct hid_device *hid, struct hid_report *report, 1683 int reqtype) 1684 { 1685 char *buf; 1686 int ret; 1687 u32 len; 1688 1689 buf = hid_alloc_report_buf(report, GFP_KERNEL); 1690 if (!buf) 1691 return -ENOMEM; 1692 1693 len = hid_report_len(report); 1694 1695 if (reqtype == HID_REQ_SET_REPORT) 1696 hid_output_report(report, buf); 1697 1698 ret = hid->ll_driver->raw_request(hid, report->id, buf, len, 1699 report->type, reqtype); 1700 if (ret < 0) { 1701 dbg_hid("unable to complete request: %d\n", ret); 1702 goto out; 1703 } 1704 1705 if (reqtype == HID_REQ_GET_REPORT) 1706 hid_input_report(hid, report->type, buf, ret, 0); 1707 1708 ret = 0; 1709 1710 out: 1711 kfree(buf); 1712 return ret; 1713 } 1714 EXPORT_SYMBOL_GPL(__hid_request); 1715 1716 int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, 1717 int interrupt) 1718 { 1719 struct hid_report_enum *report_enum = hid->report_enum + type; 1720 struct hid_report *report; 1721 struct hid_driver *hdrv; 1722 unsigned int a; 1723 u32 rsize, csize = size; 1724 u8 *cdata = data; 1725 int ret = 0; 1726 1727 report = hid_get_report(report_enum, data); 1728 if (!report) 1729 goto out; 1730 1731 if (report_enum->numbered) { 1732 cdata++; 1733 csize--; 1734 } 1735 1736 rsize = ((report->size - 1) >> 3) + 1; 1737 1738 if (rsize > HID_MAX_BUFFER_SIZE) 1739 rsize = HID_MAX_BUFFER_SIZE; 1740 1741 if (csize < rsize) { 1742 dbg_hid("report %d is too short, (%d < %d)\n", report->id, 1743 csize, rsize); 1744 memset(cdata + csize, 0, rsize - csize); 1745 } 1746 1747 if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event) 1748 hid->hiddev_report_event(hid, report); 1749 if (hid->claimed & HID_CLAIMED_HIDRAW) { 1750 ret = hidraw_report_event(hid, data, size); 1751 if (ret) 1752 goto out; 1753 } 1754 1755 if (hid->claimed != HID_CLAIMED_HIDRAW && report->maxfield) { 1756 for (a = 0; a < report->maxfield; a++) 1757 hid_input_field(hid, report->field[a], cdata, interrupt); 1758 hdrv = hid->driver; 1759 if (hdrv && hdrv->report) 1760 hdrv->report(hid, report); 1761 } 1762 1763 if (hid->claimed & HID_CLAIMED_INPUT) 1764 hidinput_report_event(hid, report); 1765 out: 1766 return ret; 1767 } 1768 EXPORT_SYMBOL_GPL(hid_report_raw_event); 1769 1770 /** 1771 * hid_input_report - report data from lower layer (usb, bt...) 1772 * 1773 * @hid: hid device 1774 * @type: HID report type (HID_*_REPORT) 1775 * @data: report contents 1776 * @size: size of data parameter 1777 * @interrupt: distinguish between interrupt and control transfers 1778 * 1779 * This is data entry for lower layers. 1780 */ 1781 int hid_input_report(struct hid_device *hid, int type, u8 *data, u32 size, int interrupt) 1782 { 1783 struct hid_report_enum *report_enum; 1784 struct hid_driver *hdrv; 1785 struct hid_report *report; 1786 int ret = 0; 1787 1788 if (!hid) 1789 return -ENODEV; 1790 1791 if (down_trylock(&hid->driver_input_lock)) 1792 return -EBUSY; 1793 1794 if (!hid->driver) { 1795 ret = -ENODEV; 1796 goto unlock; 1797 } 1798 report_enum = hid->report_enum + type; 1799 hdrv = hid->driver; 1800 1801 if (!size) { 1802 dbg_hid("empty report\n"); 1803 ret = -1; 1804 goto unlock; 1805 } 1806 1807 /* Avoid unnecessary overhead if debugfs is disabled */ 1808 if (!list_empty(&hid->debug_list)) 1809 hid_dump_report(hid, type, data, size); 1810 1811 report = hid_get_report(report_enum, data); 1812 1813 if (!report) { 1814 ret = -1; 1815 goto unlock; 1816 } 1817 1818 if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) { 1819 ret = hdrv->raw_event(hid, report, data, size); 1820 if (ret < 0) 1821 goto unlock; 1822 } 1823 1824 ret = hid_report_raw_event(hid, type, data, size, interrupt); 1825 1826 unlock: 1827 up(&hid->driver_input_lock); 1828 return ret; 1829 } 1830 EXPORT_SYMBOL_GPL(hid_input_report); 1831 1832 bool hid_match_one_id(const struct hid_device *hdev, 1833 const struct hid_device_id *id) 1834 { 1835 return (id->bus == HID_BUS_ANY || id->bus == hdev->bus) && 1836 (id->group == HID_GROUP_ANY || id->group == hdev->group) && 1837 (id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) && 1838 (id->product == HID_ANY_ID || id->product == hdev->product); 1839 } 1840 1841 const struct hid_device_id *hid_match_id(const struct hid_device *hdev, 1842 const struct hid_device_id *id) 1843 { 1844 for (; id->bus; id++) 1845 if (hid_match_one_id(hdev, id)) 1846 return id; 1847 1848 return NULL; 1849 } 1850 1851 static const struct hid_device_id hid_hiddev_list[] = { 1852 { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS) }, 1853 { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS1) }, 1854 { } 1855 }; 1856 1857 static bool hid_hiddev(struct hid_device *hdev) 1858 { 1859 return !!hid_match_id(hdev, hid_hiddev_list); 1860 } 1861 1862 1863 static ssize_t 1864 read_report_descriptor(struct file *filp, struct kobject *kobj, 1865 struct bin_attribute *attr, 1866 char *buf, loff_t off, size_t count) 1867 { 1868 struct device *dev = kobj_to_dev(kobj); 1869 struct hid_device *hdev = to_hid_device(dev); 1870 1871 if (off >= hdev->rsize) 1872 return 0; 1873 1874 if (off + count > hdev->rsize) 1875 count = hdev->rsize - off; 1876 1877 memcpy(buf, hdev->rdesc + off, count); 1878 1879 return count; 1880 } 1881 1882 static ssize_t 1883 show_country(struct device *dev, struct device_attribute *attr, 1884 char *buf) 1885 { 1886 struct hid_device *hdev = to_hid_device(dev); 1887 1888 return sprintf(buf, "%02x\n", hdev->country & 0xff); 1889 } 1890 1891 static struct bin_attribute dev_bin_attr_report_desc = { 1892 .attr = { .name = "report_descriptor", .mode = 0444 }, 1893 .read = read_report_descriptor, 1894 .size = HID_MAX_DESCRIPTOR_SIZE, 1895 }; 1896 1897 static const struct device_attribute dev_attr_country = { 1898 .attr = { .name = "country", .mode = 0444 }, 1899 .show = show_country, 1900 }; 1901 1902 int hid_connect(struct hid_device *hdev, unsigned int connect_mask) 1903 { 1904 static const char *types[] = { "Device", "Pointer", "Mouse", "Device", 1905 "Joystick", "Gamepad", "Keyboard", "Keypad", 1906 "Multi-Axis Controller" 1907 }; 1908 const char *type, *bus; 1909 char buf[64] = ""; 1910 unsigned int i; 1911 int len; 1912 int ret; 1913 1914 if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE) 1915 connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV); 1916 if (hdev->quirks & HID_QUIRK_HIDINPUT_FORCE) 1917 connect_mask |= HID_CONNECT_HIDINPUT_FORCE; 1918 if (hdev->bus != BUS_USB) 1919 connect_mask &= ~HID_CONNECT_HIDDEV; 1920 if (hid_hiddev(hdev)) 1921 connect_mask |= HID_CONNECT_HIDDEV_FORCE; 1922 1923 if ((connect_mask & HID_CONNECT_HIDINPUT) && !hidinput_connect(hdev, 1924 connect_mask & HID_CONNECT_HIDINPUT_FORCE)) 1925 hdev->claimed |= HID_CLAIMED_INPUT; 1926 1927 if ((connect_mask & HID_CONNECT_HIDDEV) && hdev->hiddev_connect && 1928 !hdev->hiddev_connect(hdev, 1929 connect_mask & HID_CONNECT_HIDDEV_FORCE)) 1930 hdev->claimed |= HID_CLAIMED_HIDDEV; 1931 if ((connect_mask & HID_CONNECT_HIDRAW) && !hidraw_connect(hdev)) 1932 hdev->claimed |= HID_CLAIMED_HIDRAW; 1933 1934 if (connect_mask & HID_CONNECT_DRIVER) 1935 hdev->claimed |= HID_CLAIMED_DRIVER; 1936 1937 /* Drivers with the ->raw_event callback set are not required to connect 1938 * to any other listener. */ 1939 if (!hdev->claimed && !hdev->driver->raw_event) { 1940 hid_err(hdev, "device has no listeners, quitting\n"); 1941 return -ENODEV; 1942 } 1943 1944 if ((hdev->claimed & HID_CLAIMED_INPUT) && 1945 (connect_mask & HID_CONNECT_FF) && hdev->ff_init) 1946 hdev->ff_init(hdev); 1947 1948 len = 0; 1949 if (hdev->claimed & HID_CLAIMED_INPUT) 1950 len += sprintf(buf + len, "input"); 1951 if (hdev->claimed & HID_CLAIMED_HIDDEV) 1952 len += sprintf(buf + len, "%shiddev%d", len ? "," : "", 1953 ((struct hiddev *)hdev->hiddev)->minor); 1954 if (hdev->claimed & HID_CLAIMED_HIDRAW) 1955 len += sprintf(buf + len, "%shidraw%d", len ? "," : "", 1956 ((struct hidraw *)hdev->hidraw)->minor); 1957 1958 type = "Device"; 1959 for (i = 0; i < hdev->maxcollection; i++) { 1960 struct hid_collection *col = &hdev->collection[i]; 1961 if (col->type == HID_COLLECTION_APPLICATION && 1962 (col->usage & HID_USAGE_PAGE) == HID_UP_GENDESK && 1963 (col->usage & 0xffff) < ARRAY_SIZE(types)) { 1964 type = types[col->usage & 0xffff]; 1965 break; 1966 } 1967 } 1968 1969 switch (hdev->bus) { 1970 case BUS_USB: 1971 bus = "USB"; 1972 break; 1973 case BUS_BLUETOOTH: 1974 bus = "BLUETOOTH"; 1975 break; 1976 case BUS_I2C: 1977 bus = "I2C"; 1978 break; 1979 default: 1980 bus = "<UNKNOWN>"; 1981 } 1982 1983 ret = device_create_file(&hdev->dev, &dev_attr_country); 1984 if (ret) 1985 hid_warn(hdev, 1986 "can't create sysfs country code attribute err: %d\n", ret); 1987 1988 hid_info(hdev, "%s: %s HID v%x.%02x %s [%s] on %s\n", 1989 buf, bus, hdev->version >> 8, hdev->version & 0xff, 1990 type, hdev->name, hdev->phys); 1991 1992 return 0; 1993 } 1994 EXPORT_SYMBOL_GPL(hid_connect); 1995 1996 void hid_disconnect(struct hid_device *hdev) 1997 { 1998 device_remove_file(&hdev->dev, &dev_attr_country); 1999 if (hdev->claimed & HID_CLAIMED_INPUT) 2000 hidinput_disconnect(hdev); 2001 if (hdev->claimed & HID_CLAIMED_HIDDEV) 2002 hdev->hiddev_disconnect(hdev); 2003 if (hdev->claimed & HID_CLAIMED_HIDRAW) 2004 hidraw_disconnect(hdev); 2005 hdev->claimed = 0; 2006 } 2007 EXPORT_SYMBOL_GPL(hid_disconnect); 2008 2009 /** 2010 * hid_hw_start - start underlying HW 2011 * @hdev: hid device 2012 * @connect_mask: which outputs to connect, see HID_CONNECT_* 2013 * 2014 * Call this in probe function *after* hid_parse. This will setup HW 2015 * buffers and start the device (if not defeirred to device open). 2016 * hid_hw_stop must be called if this was successful. 2017 */ 2018 int hid_hw_start(struct hid_device *hdev, unsigned int connect_mask) 2019 { 2020 int error; 2021 2022 error = hdev->ll_driver->start(hdev); 2023 if (error) 2024 return error; 2025 2026 if (connect_mask) { 2027 error = hid_connect(hdev, connect_mask); 2028 if (error) { 2029 hdev->ll_driver->stop(hdev); 2030 return error; 2031 } 2032 } 2033 2034 return 0; 2035 } 2036 EXPORT_SYMBOL_GPL(hid_hw_start); 2037 2038 /** 2039 * hid_hw_stop - stop underlying HW 2040 * @hdev: hid device 2041 * 2042 * This is usually called from remove function or from probe when something 2043 * failed and hid_hw_start was called already. 2044 */ 2045 void hid_hw_stop(struct hid_device *hdev) 2046 { 2047 hid_disconnect(hdev); 2048 hdev->ll_driver->stop(hdev); 2049 } 2050 EXPORT_SYMBOL_GPL(hid_hw_stop); 2051 2052 /** 2053 * hid_hw_open - signal underlying HW to start delivering events 2054 * @hdev: hid device 2055 * 2056 * Tell underlying HW to start delivering events from the device. 2057 * This function should be called sometime after successful call 2058 * to hid_hw_start(). 2059 */ 2060 int hid_hw_open(struct hid_device *hdev) 2061 { 2062 int ret; 2063 2064 ret = mutex_lock_killable(&hdev->ll_open_lock); 2065 if (ret) 2066 return ret; 2067 2068 if (!hdev->ll_open_count++) { 2069 ret = hdev->ll_driver->open(hdev); 2070 if (ret) 2071 hdev->ll_open_count--; 2072 } 2073 2074 mutex_unlock(&hdev->ll_open_lock); 2075 return ret; 2076 } 2077 EXPORT_SYMBOL_GPL(hid_hw_open); 2078 2079 /** 2080 * hid_hw_close - signal underlaying HW to stop delivering events 2081 * 2082 * @hdev: hid device 2083 * 2084 * This function indicates that we are not interested in the events 2085 * from this device anymore. Delivery of events may or may not stop, 2086 * depending on the number of users still outstanding. 2087 */ 2088 void hid_hw_close(struct hid_device *hdev) 2089 { 2090 mutex_lock(&hdev->ll_open_lock); 2091 if (!--hdev->ll_open_count) 2092 hdev->ll_driver->close(hdev); 2093 mutex_unlock(&hdev->ll_open_lock); 2094 } 2095 EXPORT_SYMBOL_GPL(hid_hw_close); 2096 2097 struct hid_dynid { 2098 struct list_head list; 2099 struct hid_device_id id; 2100 }; 2101 2102 /** 2103 * store_new_id - add a new HID device ID to this driver and re-probe devices 2104 * @driver: target device driver 2105 * @buf: buffer for scanning device ID data 2106 * @count: input size 2107 * 2108 * Adds a new dynamic hid device ID to this driver, 2109 * and causes the driver to probe for all devices again. 2110 */ 2111 static ssize_t new_id_store(struct device_driver *drv, const char *buf, 2112 size_t count) 2113 { 2114 struct hid_driver *hdrv = to_hid_driver(drv); 2115 struct hid_dynid *dynid; 2116 __u32 bus, vendor, product; 2117 unsigned long driver_data = 0; 2118 int ret; 2119 2120 ret = sscanf(buf, "%x %x %x %lx", 2121 &bus, &vendor, &product, &driver_data); 2122 if (ret < 3) 2123 return -EINVAL; 2124 2125 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); 2126 if (!dynid) 2127 return -ENOMEM; 2128 2129 dynid->id.bus = bus; 2130 dynid->id.group = HID_GROUP_ANY; 2131 dynid->id.vendor = vendor; 2132 dynid->id.product = product; 2133 dynid->id.driver_data = driver_data; 2134 2135 spin_lock(&hdrv->dyn_lock); 2136 list_add_tail(&dynid->list, &hdrv->dyn_list); 2137 spin_unlock(&hdrv->dyn_lock); 2138 2139 ret = driver_attach(&hdrv->driver); 2140 2141 return ret ? : count; 2142 } 2143 static DRIVER_ATTR_WO(new_id); 2144 2145 static struct attribute *hid_drv_attrs[] = { 2146 &driver_attr_new_id.attr, 2147 NULL, 2148 }; 2149 ATTRIBUTE_GROUPS(hid_drv); 2150 2151 static void hid_free_dynids(struct hid_driver *hdrv) 2152 { 2153 struct hid_dynid *dynid, *n; 2154 2155 spin_lock(&hdrv->dyn_lock); 2156 list_for_each_entry_safe(dynid, n, &hdrv->dyn_list, list) { 2157 list_del(&dynid->list); 2158 kfree(dynid); 2159 } 2160 spin_unlock(&hdrv->dyn_lock); 2161 } 2162 2163 const struct hid_device_id *hid_match_device(struct hid_device *hdev, 2164 struct hid_driver *hdrv) 2165 { 2166 struct hid_dynid *dynid; 2167 2168 spin_lock(&hdrv->dyn_lock); 2169 list_for_each_entry(dynid, &hdrv->dyn_list, list) { 2170 if (hid_match_one_id(hdev, &dynid->id)) { 2171 spin_unlock(&hdrv->dyn_lock); 2172 return &dynid->id; 2173 } 2174 } 2175 spin_unlock(&hdrv->dyn_lock); 2176 2177 return hid_match_id(hdev, hdrv->id_table); 2178 } 2179 EXPORT_SYMBOL_GPL(hid_match_device); 2180 2181 static int hid_bus_match(struct device *dev, struct device_driver *drv) 2182 { 2183 struct hid_driver *hdrv = to_hid_driver(drv); 2184 struct hid_device *hdev = to_hid_device(dev); 2185 2186 return hid_match_device(hdev, hdrv) != NULL; 2187 } 2188 2189 /** 2190 * hid_compare_device_paths - check if both devices share the same path 2191 * @hdev_a: hid device 2192 * @hdev_b: hid device 2193 * @separator: char to use as separator 2194 * 2195 * Check if two devices share the same path up to the last occurrence of 2196 * the separator char. Both paths must exist (i.e., zero-length paths 2197 * don't match). 2198 */ 2199 bool hid_compare_device_paths(struct hid_device *hdev_a, 2200 struct hid_device *hdev_b, char separator) 2201 { 2202 int n1 = strrchr(hdev_a->phys, separator) - hdev_a->phys; 2203 int n2 = strrchr(hdev_b->phys, separator) - hdev_b->phys; 2204 2205 if (n1 != n2 || n1 <= 0 || n2 <= 0) 2206 return false; 2207 2208 return !strncmp(hdev_a->phys, hdev_b->phys, n1); 2209 } 2210 EXPORT_SYMBOL_GPL(hid_compare_device_paths); 2211 2212 static int hid_device_probe(struct device *dev) 2213 { 2214 struct hid_driver *hdrv = to_hid_driver(dev->driver); 2215 struct hid_device *hdev = to_hid_device(dev); 2216 const struct hid_device_id *id; 2217 int ret = 0; 2218 2219 if (down_interruptible(&hdev->driver_input_lock)) { 2220 ret = -EINTR; 2221 goto end; 2222 } 2223 hdev->io_started = false; 2224 2225 clear_bit(ffs(HID_STAT_REPROBED), &hdev->status); 2226 2227 if (!hdev->driver) { 2228 id = hid_match_device(hdev, hdrv); 2229 if (id == NULL) { 2230 ret = -ENODEV; 2231 goto unlock; 2232 } 2233 2234 if (hdrv->match) { 2235 if (!hdrv->match(hdev, hid_ignore_special_drivers)) { 2236 ret = -ENODEV; 2237 goto unlock; 2238 } 2239 } else { 2240 /* 2241 * hid-generic implements .match(), so if 2242 * hid_ignore_special_drivers is set, we can safely 2243 * return. 2244 */ 2245 if (hid_ignore_special_drivers) { 2246 ret = -ENODEV; 2247 goto unlock; 2248 } 2249 } 2250 2251 /* reset the quirks that has been previously set */ 2252 hdev->quirks = hid_lookup_quirk(hdev); 2253 hdev->driver = hdrv; 2254 if (hdrv->probe) { 2255 ret = hdrv->probe(hdev, id); 2256 } else { /* default probe */ 2257 ret = hid_open_report(hdev); 2258 if (!ret) 2259 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); 2260 } 2261 if (ret) { 2262 hid_close_report(hdev); 2263 hdev->driver = NULL; 2264 } 2265 } 2266 unlock: 2267 if (!hdev->io_started) 2268 up(&hdev->driver_input_lock); 2269 end: 2270 return ret; 2271 } 2272 2273 static int hid_device_remove(struct device *dev) 2274 { 2275 struct hid_device *hdev = to_hid_device(dev); 2276 struct hid_driver *hdrv; 2277 int ret = 0; 2278 2279 if (down_interruptible(&hdev->driver_input_lock)) { 2280 ret = -EINTR; 2281 goto end; 2282 } 2283 hdev->io_started = false; 2284 2285 hdrv = hdev->driver; 2286 if (hdrv) { 2287 if (hdrv->remove) 2288 hdrv->remove(hdev); 2289 else /* default remove */ 2290 hid_hw_stop(hdev); 2291 hid_close_report(hdev); 2292 hdev->driver = NULL; 2293 } 2294 2295 if (!hdev->io_started) 2296 up(&hdev->driver_input_lock); 2297 end: 2298 return ret; 2299 } 2300 2301 static ssize_t modalias_show(struct device *dev, struct device_attribute *a, 2302 char *buf) 2303 { 2304 struct hid_device *hdev = container_of(dev, struct hid_device, dev); 2305 2306 return scnprintf(buf, PAGE_SIZE, "hid:b%04Xg%04Xv%08Xp%08X\n", 2307 hdev->bus, hdev->group, hdev->vendor, hdev->product); 2308 } 2309 static DEVICE_ATTR_RO(modalias); 2310 2311 static struct attribute *hid_dev_attrs[] = { 2312 &dev_attr_modalias.attr, 2313 NULL, 2314 }; 2315 static struct bin_attribute *hid_dev_bin_attrs[] = { 2316 &dev_bin_attr_report_desc, 2317 NULL 2318 }; 2319 static const struct attribute_group hid_dev_group = { 2320 .attrs = hid_dev_attrs, 2321 .bin_attrs = hid_dev_bin_attrs, 2322 }; 2323 __ATTRIBUTE_GROUPS(hid_dev); 2324 2325 static int hid_uevent(struct device *dev, struct kobj_uevent_env *env) 2326 { 2327 struct hid_device *hdev = to_hid_device(dev); 2328 2329 if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X", 2330 hdev->bus, hdev->vendor, hdev->product)) 2331 return -ENOMEM; 2332 2333 if (add_uevent_var(env, "HID_NAME=%s", hdev->name)) 2334 return -ENOMEM; 2335 2336 if (add_uevent_var(env, "HID_PHYS=%s", hdev->phys)) 2337 return -ENOMEM; 2338 2339 if (add_uevent_var(env, "HID_UNIQ=%s", hdev->uniq)) 2340 return -ENOMEM; 2341 2342 if (add_uevent_var(env, "MODALIAS=hid:b%04Xg%04Xv%08Xp%08X", 2343 hdev->bus, hdev->group, hdev->vendor, hdev->product)) 2344 return -ENOMEM; 2345 2346 return 0; 2347 } 2348 2349 struct bus_type hid_bus_type = { 2350 .name = "hid", 2351 .dev_groups = hid_dev_groups, 2352 .drv_groups = hid_drv_groups, 2353 .match = hid_bus_match, 2354 .probe = hid_device_probe, 2355 .remove = hid_device_remove, 2356 .uevent = hid_uevent, 2357 }; 2358 EXPORT_SYMBOL(hid_bus_type); 2359 2360 int hid_add_device(struct hid_device *hdev) 2361 { 2362 static atomic_t id = ATOMIC_INIT(0); 2363 int ret; 2364 2365 if (WARN_ON(hdev->status & HID_STAT_ADDED)) 2366 return -EBUSY; 2367 2368 hdev->quirks = hid_lookup_quirk(hdev); 2369 2370 /* we need to kill them here, otherwise they will stay allocated to 2371 * wait for coming driver */ 2372 if (hid_ignore(hdev)) 2373 return -ENODEV; 2374 2375 /* 2376 * Check for the mandatory transport channel. 2377 */ 2378 if (!hdev->ll_driver->raw_request) { 2379 hid_err(hdev, "transport driver missing .raw_request()\n"); 2380 return -EINVAL; 2381 } 2382 2383 /* 2384 * Read the device report descriptor once and use as template 2385 * for the driver-specific modifications. 2386 */ 2387 ret = hdev->ll_driver->parse(hdev); 2388 if (ret) 2389 return ret; 2390 if (!hdev->dev_rdesc) 2391 return -ENODEV; 2392 2393 /* 2394 * Scan generic devices for group information 2395 */ 2396 if (hid_ignore_special_drivers) { 2397 hdev->group = HID_GROUP_GENERIC; 2398 } else if (!hdev->group && 2399 !(hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)) { 2400 ret = hid_scan_report(hdev); 2401 if (ret) 2402 hid_warn(hdev, "bad device descriptor (%d)\n", ret); 2403 } 2404 2405 /* XXX hack, any other cleaner solution after the driver core 2406 * is converted to allow more than 20 bytes as the device name? */ 2407 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus, 2408 hdev->vendor, hdev->product, atomic_inc_return(&id)); 2409 2410 hid_debug_register(hdev, dev_name(&hdev->dev)); 2411 ret = device_add(&hdev->dev); 2412 if (!ret) 2413 hdev->status |= HID_STAT_ADDED; 2414 else 2415 hid_debug_unregister(hdev); 2416 2417 return ret; 2418 } 2419 EXPORT_SYMBOL_GPL(hid_add_device); 2420 2421 /** 2422 * hid_allocate_device - allocate new hid device descriptor 2423 * 2424 * Allocate and initialize hid device, so that hid_destroy_device might be 2425 * used to free it. 2426 * 2427 * New hid_device pointer is returned on success, otherwise ERR_PTR encoded 2428 * error value. 2429 */ 2430 struct hid_device *hid_allocate_device(void) 2431 { 2432 struct hid_device *hdev; 2433 int ret = -ENOMEM; 2434 2435 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL); 2436 if (hdev == NULL) 2437 return ERR_PTR(ret); 2438 2439 device_initialize(&hdev->dev); 2440 hdev->dev.release = hid_device_release; 2441 hdev->dev.bus = &hid_bus_type; 2442 device_enable_async_suspend(&hdev->dev); 2443 2444 hid_close_report(hdev); 2445 2446 init_waitqueue_head(&hdev->debug_wait); 2447 INIT_LIST_HEAD(&hdev->debug_list); 2448 spin_lock_init(&hdev->debug_list_lock); 2449 sema_init(&hdev->driver_input_lock, 1); 2450 mutex_init(&hdev->ll_open_lock); 2451 2452 return hdev; 2453 } 2454 EXPORT_SYMBOL_GPL(hid_allocate_device); 2455 2456 static void hid_remove_device(struct hid_device *hdev) 2457 { 2458 if (hdev->status & HID_STAT_ADDED) { 2459 device_del(&hdev->dev); 2460 hid_debug_unregister(hdev); 2461 hdev->status &= ~HID_STAT_ADDED; 2462 } 2463 kfree(hdev->dev_rdesc); 2464 hdev->dev_rdesc = NULL; 2465 hdev->dev_rsize = 0; 2466 } 2467 2468 /** 2469 * hid_destroy_device - free previously allocated device 2470 * 2471 * @hdev: hid device 2472 * 2473 * If you allocate hid_device through hid_allocate_device, you should ever 2474 * free by this function. 2475 */ 2476 void hid_destroy_device(struct hid_device *hdev) 2477 { 2478 hid_remove_device(hdev); 2479 put_device(&hdev->dev); 2480 } 2481 EXPORT_SYMBOL_GPL(hid_destroy_device); 2482 2483 2484 static int __hid_bus_reprobe_drivers(struct device *dev, void *data) 2485 { 2486 struct hid_driver *hdrv = data; 2487 struct hid_device *hdev = to_hid_device(dev); 2488 2489 if (hdev->driver == hdrv && 2490 !hdrv->match(hdev, hid_ignore_special_drivers) && 2491 !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status)) 2492 return device_reprobe(dev); 2493 2494 return 0; 2495 } 2496 2497 static int __hid_bus_driver_added(struct device_driver *drv, void *data) 2498 { 2499 struct hid_driver *hdrv = to_hid_driver(drv); 2500 2501 if (hdrv->match) { 2502 bus_for_each_dev(&hid_bus_type, NULL, hdrv, 2503 __hid_bus_reprobe_drivers); 2504 } 2505 2506 return 0; 2507 } 2508 2509 static int __bus_removed_driver(struct device_driver *drv, void *data) 2510 { 2511 return bus_rescan_devices(&hid_bus_type); 2512 } 2513 2514 int __hid_register_driver(struct hid_driver *hdrv, struct module *owner, 2515 const char *mod_name) 2516 { 2517 int ret; 2518 2519 hdrv->driver.name = hdrv->name; 2520 hdrv->driver.bus = &hid_bus_type; 2521 hdrv->driver.owner = owner; 2522 hdrv->driver.mod_name = mod_name; 2523 2524 INIT_LIST_HEAD(&hdrv->dyn_list); 2525 spin_lock_init(&hdrv->dyn_lock); 2526 2527 ret = driver_register(&hdrv->driver); 2528 2529 if (ret == 0) 2530 bus_for_each_drv(&hid_bus_type, NULL, NULL, 2531 __hid_bus_driver_added); 2532 2533 return ret; 2534 } 2535 EXPORT_SYMBOL_GPL(__hid_register_driver); 2536 2537 void hid_unregister_driver(struct hid_driver *hdrv) 2538 { 2539 driver_unregister(&hdrv->driver); 2540 hid_free_dynids(hdrv); 2541 2542 bus_for_each_drv(&hid_bus_type, NULL, hdrv, __bus_removed_driver); 2543 } 2544 EXPORT_SYMBOL_GPL(hid_unregister_driver); 2545 2546 int hid_check_keys_pressed(struct hid_device *hid) 2547 { 2548 struct hid_input *hidinput; 2549 int i; 2550 2551 if (!(hid->claimed & HID_CLAIMED_INPUT)) 2552 return 0; 2553 2554 list_for_each_entry(hidinput, &hid->inputs, list) { 2555 for (i = 0; i < BITS_TO_LONGS(KEY_MAX); i++) 2556 if (hidinput->input->key[i]) 2557 return 1; 2558 } 2559 2560 return 0; 2561 } 2562 2563 EXPORT_SYMBOL_GPL(hid_check_keys_pressed); 2564 2565 static int __init hid_init(void) 2566 { 2567 int ret; 2568 2569 if (hid_debug) 2570 pr_warn("hid_debug is now used solely for parser and driver debugging.\n" 2571 "debugfs is now used for inspecting the device (report descriptor, reports)\n"); 2572 2573 ret = bus_register(&hid_bus_type); 2574 if (ret) { 2575 pr_err("can't register hid bus\n"); 2576 goto err; 2577 } 2578 2579 ret = hidraw_init(); 2580 if (ret) 2581 goto err_bus; 2582 2583 hid_debug_init(); 2584 2585 return 0; 2586 err_bus: 2587 bus_unregister(&hid_bus_type); 2588 err: 2589 return ret; 2590 } 2591 2592 static void __exit hid_exit(void) 2593 { 2594 hid_debug_exit(); 2595 hidraw_exit(); 2596 bus_unregister(&hid_bus_type); 2597 hid_quirks_exit(HID_BUS_ANY); 2598 } 2599 2600 module_init(hid_init); 2601 module_exit(hid_exit); 2602 2603 MODULE_AUTHOR("Andreas Gal"); 2604 MODULE_AUTHOR("Vojtech Pavlik"); 2605 MODULE_AUTHOR("Jiri Kosina"); 2606 MODULE_LICENSE("GPL"); 2607