1 #include <linux/kernel.h> 2 #include <linux/errno.h> 3 #include <linux/init.h> 4 #include <linux/slab.h> 5 #include <linux/mm.h> 6 #include <linux/module.h> 7 #include <linux/moduleparam.h> 8 #include <linux/scatterlist.h> 9 #include <linux/mutex.h> 10 #include <linux/timer.h> 11 #include <linux/usb.h> 12 13 #define SIMPLE_IO_TIMEOUT 10000 /* in milliseconds */ 14 15 /*-------------------------------------------------------------------------*/ 16 17 static int override_alt = -1; 18 module_param_named(alt, override_alt, int, 0644); 19 MODULE_PARM_DESC(alt, ">= 0 to override altsetting selection"); 20 static void complicated_callback(struct urb *urb); 21 22 /*-------------------------------------------------------------------------*/ 23 24 /* FIXME make these public somewhere; usbdevfs.h? */ 25 26 /* Parameter for usbtest driver. */ 27 struct usbtest_param_32 { 28 /* inputs */ 29 __u32 test_num; /* 0..(TEST_CASES-1) */ 30 __u32 iterations; 31 __u32 length; 32 __u32 vary; 33 __u32 sglen; 34 35 /* outputs */ 36 __s32 duration_sec; 37 __s32 duration_usec; 38 }; 39 40 /* 41 * Compat parameter to the usbtest driver. 42 * This supports older user space binaries compiled with 64 bit compiler. 43 */ 44 struct usbtest_param_64 { 45 /* inputs */ 46 __u32 test_num; /* 0..(TEST_CASES-1) */ 47 __u32 iterations; 48 __u32 length; 49 __u32 vary; 50 __u32 sglen; 51 52 /* outputs */ 53 __s64 duration_sec; 54 __s64 duration_usec; 55 }; 56 57 /* IOCTL interface to the driver. */ 58 #define USBTEST_REQUEST_32 _IOWR('U', 100, struct usbtest_param_32) 59 /* COMPAT IOCTL interface to the driver. */ 60 #define USBTEST_REQUEST_64 _IOWR('U', 100, struct usbtest_param_64) 61 62 /*-------------------------------------------------------------------------*/ 63 64 #define GENERIC /* let probe() bind using module params */ 65 66 /* Some devices that can be used for testing will have "real" drivers. 67 * Entries for those need to be enabled here by hand, after disabling 68 * that "real" driver. 69 */ 70 //#define IBOT2 /* grab iBOT2 webcams */ 71 //#define KEYSPAN_19Qi /* grab un-renumerated serial adapter */ 72 73 /*-------------------------------------------------------------------------*/ 74 75 struct usbtest_info { 76 const char *name; 77 u8 ep_in; /* bulk/intr source */ 78 u8 ep_out; /* bulk/intr sink */ 79 unsigned autoconf:1; 80 unsigned ctrl_out:1; 81 unsigned iso:1; /* try iso in/out */ 82 unsigned intr:1; /* try interrupt in/out */ 83 int alt; 84 }; 85 86 /* this is accessed only through usbfs ioctl calls. 87 * one ioctl to issue a test ... one lock per device. 88 * tests create other threads if they need them. 89 * urbs and buffers are allocated dynamically, 90 * and data generated deterministically. 91 */ 92 struct usbtest_dev { 93 struct usb_interface *intf; 94 struct usbtest_info *info; 95 int in_pipe; 96 int out_pipe; 97 int in_iso_pipe; 98 int out_iso_pipe; 99 int in_int_pipe; 100 int out_int_pipe; 101 struct usb_endpoint_descriptor *iso_in, *iso_out; 102 struct usb_endpoint_descriptor *int_in, *int_out; 103 struct mutex lock; 104 105 #define TBUF_SIZE 256 106 u8 *buf; 107 }; 108 109 static struct usb_device *testdev_to_usbdev(struct usbtest_dev *test) 110 { 111 return interface_to_usbdev(test->intf); 112 } 113 114 /* set up all urbs so they can be used with either bulk or interrupt */ 115 #define INTERRUPT_RATE 1 /* msec/transfer */ 116 117 #define ERROR(tdev, fmt, args...) \ 118 dev_err(&(tdev)->intf->dev , fmt , ## args) 119 #define WARNING(tdev, fmt, args...) \ 120 dev_warn(&(tdev)->intf->dev , fmt , ## args) 121 122 #define GUARD_BYTE 0xA5 123 #define MAX_SGLEN 128 124 125 /*-------------------------------------------------------------------------*/ 126 127 static inline void endpoint_update(int edi, 128 struct usb_host_endpoint **in, 129 struct usb_host_endpoint **out, 130 struct usb_host_endpoint *e) 131 { 132 if (edi) { 133 if (!*in) 134 *in = e; 135 } else { 136 if (!*out) 137 *out = e; 138 } 139 } 140 141 static int 142 get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf) 143 { 144 int tmp; 145 struct usb_host_interface *alt; 146 struct usb_host_endpoint *in, *out; 147 struct usb_host_endpoint *iso_in, *iso_out; 148 struct usb_host_endpoint *int_in, *int_out; 149 struct usb_device *udev; 150 151 for (tmp = 0; tmp < intf->num_altsetting; tmp++) { 152 unsigned ep; 153 154 in = out = NULL; 155 iso_in = iso_out = NULL; 156 int_in = int_out = NULL; 157 alt = intf->altsetting + tmp; 158 159 if (override_alt >= 0 && 160 override_alt != alt->desc.bAlternateSetting) 161 continue; 162 163 /* take the first altsetting with in-bulk + out-bulk; 164 * ignore other endpoints and altsettings. 165 */ 166 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) { 167 struct usb_host_endpoint *e; 168 int edi; 169 170 e = alt->endpoint + ep; 171 edi = usb_endpoint_dir_in(&e->desc); 172 173 switch (usb_endpoint_type(&e->desc)) { 174 case USB_ENDPOINT_XFER_BULK: 175 endpoint_update(edi, &in, &out, e); 176 continue; 177 case USB_ENDPOINT_XFER_INT: 178 if (dev->info->intr) 179 endpoint_update(edi, &int_in, &int_out, e); 180 continue; 181 case USB_ENDPOINT_XFER_ISOC: 182 if (dev->info->iso) 183 endpoint_update(edi, &iso_in, &iso_out, e); 184 /* FALLTHROUGH */ 185 default: 186 continue; 187 } 188 } 189 if ((in && out) || iso_in || iso_out || int_in || int_out) 190 goto found; 191 } 192 return -EINVAL; 193 194 found: 195 udev = testdev_to_usbdev(dev); 196 dev->info->alt = alt->desc.bAlternateSetting; 197 if (alt->desc.bAlternateSetting != 0) { 198 tmp = usb_set_interface(udev, 199 alt->desc.bInterfaceNumber, 200 alt->desc.bAlternateSetting); 201 if (tmp < 0) 202 return tmp; 203 } 204 205 if (in) 206 dev->in_pipe = usb_rcvbulkpipe(udev, 207 in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 208 if (out) 209 dev->out_pipe = usb_sndbulkpipe(udev, 210 out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 211 212 if (iso_in) { 213 dev->iso_in = &iso_in->desc; 214 dev->in_iso_pipe = usb_rcvisocpipe(udev, 215 iso_in->desc.bEndpointAddress 216 & USB_ENDPOINT_NUMBER_MASK); 217 } 218 219 if (iso_out) { 220 dev->iso_out = &iso_out->desc; 221 dev->out_iso_pipe = usb_sndisocpipe(udev, 222 iso_out->desc.bEndpointAddress 223 & USB_ENDPOINT_NUMBER_MASK); 224 } 225 226 if (int_in) { 227 dev->int_in = &int_in->desc; 228 dev->in_int_pipe = usb_rcvintpipe(udev, 229 int_in->desc.bEndpointAddress 230 & USB_ENDPOINT_NUMBER_MASK); 231 } 232 233 if (int_out) { 234 dev->int_out = &int_out->desc; 235 dev->out_int_pipe = usb_sndintpipe(udev, 236 int_out->desc.bEndpointAddress 237 & USB_ENDPOINT_NUMBER_MASK); 238 } 239 return 0; 240 } 241 242 /*-------------------------------------------------------------------------*/ 243 244 /* Support for testing basic non-queued I/O streams. 245 * 246 * These just package urbs as requests that can be easily canceled. 247 * Each urb's data buffer is dynamically allocated; callers can fill 248 * them with non-zero test data (or test for it) when appropriate. 249 */ 250 251 static void simple_callback(struct urb *urb) 252 { 253 complete(urb->context); 254 } 255 256 static struct urb *usbtest_alloc_urb( 257 struct usb_device *udev, 258 int pipe, 259 unsigned long bytes, 260 unsigned transfer_flags, 261 unsigned offset, 262 u8 bInterval, 263 usb_complete_t complete_fn) 264 { 265 struct urb *urb; 266 267 urb = usb_alloc_urb(0, GFP_KERNEL); 268 if (!urb) 269 return urb; 270 271 if (bInterval) 272 usb_fill_int_urb(urb, udev, pipe, NULL, bytes, complete_fn, 273 NULL, bInterval); 274 else 275 usb_fill_bulk_urb(urb, udev, pipe, NULL, bytes, complete_fn, 276 NULL); 277 278 urb->interval = (udev->speed == USB_SPEED_HIGH) 279 ? (INTERRUPT_RATE << 3) 280 : INTERRUPT_RATE; 281 urb->transfer_flags = transfer_flags; 282 if (usb_pipein(pipe)) 283 urb->transfer_flags |= URB_SHORT_NOT_OK; 284 285 if ((bytes + offset) == 0) 286 return urb; 287 288 if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) 289 urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset, 290 GFP_KERNEL, &urb->transfer_dma); 291 else 292 urb->transfer_buffer = kmalloc(bytes + offset, GFP_KERNEL); 293 294 if (!urb->transfer_buffer) { 295 usb_free_urb(urb); 296 return NULL; 297 } 298 299 /* To test unaligned transfers add an offset and fill the 300 unused memory with a guard value */ 301 if (offset) { 302 memset(urb->transfer_buffer, GUARD_BYTE, offset); 303 urb->transfer_buffer += offset; 304 if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) 305 urb->transfer_dma += offset; 306 } 307 308 /* For inbound transfers use guard byte so that test fails if 309 data not correctly copied */ 310 memset(urb->transfer_buffer, 311 usb_pipein(urb->pipe) ? GUARD_BYTE : 0, 312 bytes); 313 return urb; 314 } 315 316 static struct urb *simple_alloc_urb( 317 struct usb_device *udev, 318 int pipe, 319 unsigned long bytes, 320 u8 bInterval) 321 { 322 return usbtest_alloc_urb(udev, pipe, bytes, URB_NO_TRANSFER_DMA_MAP, 0, 323 bInterval, simple_callback); 324 } 325 326 static struct urb *complicated_alloc_urb( 327 struct usb_device *udev, 328 int pipe, 329 unsigned long bytes, 330 u8 bInterval) 331 { 332 return usbtest_alloc_urb(udev, pipe, bytes, URB_NO_TRANSFER_DMA_MAP, 0, 333 bInterval, complicated_callback); 334 } 335 336 static unsigned pattern; 337 static unsigned mod_pattern; 338 module_param_named(pattern, mod_pattern, uint, S_IRUGO | S_IWUSR); 339 MODULE_PARM_DESC(mod_pattern, "i/o pattern (0 == zeroes)"); 340 341 static unsigned get_maxpacket(struct usb_device *udev, int pipe) 342 { 343 struct usb_host_endpoint *ep; 344 345 ep = usb_pipe_endpoint(udev, pipe); 346 return le16_to_cpup(&ep->desc.wMaxPacketSize); 347 } 348 349 static void simple_fill_buf(struct urb *urb) 350 { 351 unsigned i; 352 u8 *buf = urb->transfer_buffer; 353 unsigned len = urb->transfer_buffer_length; 354 unsigned maxpacket; 355 356 switch (pattern) { 357 default: 358 /* FALLTHROUGH */ 359 case 0: 360 memset(buf, 0, len); 361 break; 362 case 1: /* mod63 */ 363 maxpacket = get_maxpacket(urb->dev, urb->pipe); 364 for (i = 0; i < len; i++) 365 *buf++ = (u8) ((i % maxpacket) % 63); 366 break; 367 } 368 } 369 370 static inline unsigned long buffer_offset(void *buf) 371 { 372 return (unsigned long)buf & (ARCH_KMALLOC_MINALIGN - 1); 373 } 374 375 static int check_guard_bytes(struct usbtest_dev *tdev, struct urb *urb) 376 { 377 u8 *buf = urb->transfer_buffer; 378 u8 *guard = buf - buffer_offset(buf); 379 unsigned i; 380 381 for (i = 0; guard < buf; i++, guard++) { 382 if (*guard != GUARD_BYTE) { 383 ERROR(tdev, "guard byte[%d] %d (not %d)\n", 384 i, *guard, GUARD_BYTE); 385 return -EINVAL; 386 } 387 } 388 return 0; 389 } 390 391 static int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb) 392 { 393 unsigned i; 394 u8 expected; 395 u8 *buf = urb->transfer_buffer; 396 unsigned len = urb->actual_length; 397 unsigned maxpacket = get_maxpacket(urb->dev, urb->pipe); 398 399 int ret = check_guard_bytes(tdev, urb); 400 if (ret) 401 return ret; 402 403 for (i = 0; i < len; i++, buf++) { 404 switch (pattern) { 405 /* all-zeroes has no synchronization issues */ 406 case 0: 407 expected = 0; 408 break; 409 /* mod63 stays in sync with short-terminated transfers, 410 * or otherwise when host and gadget agree on how large 411 * each usb transfer request should be. resync is done 412 * with set_interface or set_config. 413 */ 414 case 1: /* mod63 */ 415 expected = (i % maxpacket) % 63; 416 break; 417 /* always fail unsupported patterns */ 418 default: 419 expected = !*buf; 420 break; 421 } 422 if (*buf == expected) 423 continue; 424 ERROR(tdev, "buf[%d] = %d (not %d)\n", i, *buf, expected); 425 return -EINVAL; 426 } 427 return 0; 428 } 429 430 static void simple_free_urb(struct urb *urb) 431 { 432 unsigned long offset = buffer_offset(urb->transfer_buffer); 433 434 if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) 435 usb_free_coherent( 436 urb->dev, 437 urb->transfer_buffer_length + offset, 438 urb->transfer_buffer - offset, 439 urb->transfer_dma - offset); 440 else 441 kfree(urb->transfer_buffer - offset); 442 usb_free_urb(urb); 443 } 444 445 static int simple_io( 446 struct usbtest_dev *tdev, 447 struct urb *urb, 448 int iterations, 449 int vary, 450 int expected, 451 const char *label 452 ) 453 { 454 struct usb_device *udev = urb->dev; 455 int max = urb->transfer_buffer_length; 456 struct completion completion; 457 int retval = 0; 458 unsigned long expire; 459 460 urb->context = &completion; 461 while (retval == 0 && iterations-- > 0) { 462 init_completion(&completion); 463 if (usb_pipeout(urb->pipe)) { 464 simple_fill_buf(urb); 465 urb->transfer_flags |= URB_ZERO_PACKET; 466 } 467 retval = usb_submit_urb(urb, GFP_KERNEL); 468 if (retval != 0) 469 break; 470 471 expire = msecs_to_jiffies(SIMPLE_IO_TIMEOUT); 472 if (!wait_for_completion_timeout(&completion, expire)) { 473 usb_kill_urb(urb); 474 retval = (urb->status == -ENOENT ? 475 -ETIMEDOUT : urb->status); 476 } else { 477 retval = urb->status; 478 } 479 480 urb->dev = udev; 481 if (retval == 0 && usb_pipein(urb->pipe)) 482 retval = simple_check_buf(tdev, urb); 483 484 if (vary) { 485 int len = urb->transfer_buffer_length; 486 487 len += vary; 488 len %= max; 489 if (len == 0) 490 len = (vary < max) ? vary : max; 491 urb->transfer_buffer_length = len; 492 } 493 494 /* FIXME if endpoint halted, clear halt (and log) */ 495 } 496 urb->transfer_buffer_length = max; 497 498 if (expected != retval) 499 dev_err(&udev->dev, 500 "%s failed, iterations left %d, status %d (not %d)\n", 501 label, iterations, retval, expected); 502 return retval; 503 } 504 505 506 /*-------------------------------------------------------------------------*/ 507 508 /* We use scatterlist primitives to test queued I/O. 509 * Yes, this also tests the scatterlist primitives. 510 */ 511 512 static void free_sglist(struct scatterlist *sg, int nents) 513 { 514 unsigned i; 515 516 if (!sg) 517 return; 518 for (i = 0; i < nents; i++) { 519 if (!sg_page(&sg[i])) 520 continue; 521 kfree(sg_virt(&sg[i])); 522 } 523 kfree(sg); 524 } 525 526 static struct scatterlist * 527 alloc_sglist(int nents, int max, int vary, struct usbtest_dev *dev, int pipe) 528 { 529 struct scatterlist *sg; 530 unsigned int n_size = 0; 531 unsigned i; 532 unsigned size = max; 533 unsigned maxpacket = 534 get_maxpacket(interface_to_usbdev(dev->intf), pipe); 535 536 if (max == 0) 537 return NULL; 538 539 sg = kmalloc_array(nents, sizeof(*sg), GFP_KERNEL); 540 if (!sg) 541 return NULL; 542 sg_init_table(sg, nents); 543 544 for (i = 0; i < nents; i++) { 545 char *buf; 546 unsigned j; 547 548 buf = kzalloc(size, GFP_KERNEL); 549 if (!buf) { 550 free_sglist(sg, i); 551 return NULL; 552 } 553 554 /* kmalloc pages are always physically contiguous! */ 555 sg_set_buf(&sg[i], buf, size); 556 557 switch (pattern) { 558 case 0: 559 /* already zeroed */ 560 break; 561 case 1: 562 for (j = 0; j < size; j++) 563 *buf++ = (u8) (((j + n_size) % maxpacket) % 63); 564 n_size += size; 565 break; 566 } 567 568 if (vary) { 569 size += vary; 570 size %= max; 571 if (size == 0) 572 size = (vary < max) ? vary : max; 573 } 574 } 575 576 return sg; 577 } 578 579 struct sg_timeout { 580 struct timer_list timer; 581 struct usb_sg_request *req; 582 }; 583 584 static void sg_timeout(struct timer_list *t) 585 { 586 struct sg_timeout *timeout = from_timer(timeout, t, timer); 587 588 usb_sg_cancel(timeout->req); 589 } 590 591 static int perform_sglist( 592 struct usbtest_dev *tdev, 593 unsigned iterations, 594 int pipe, 595 struct usb_sg_request *req, 596 struct scatterlist *sg, 597 int nents 598 ) 599 { 600 struct usb_device *udev = testdev_to_usbdev(tdev); 601 int retval = 0; 602 struct sg_timeout timeout = { 603 .req = req, 604 }; 605 606 timer_setup_on_stack(&timeout.timer, sg_timeout, 0); 607 608 while (retval == 0 && iterations-- > 0) { 609 retval = usb_sg_init(req, udev, pipe, 610 (udev->speed == USB_SPEED_HIGH) 611 ? (INTERRUPT_RATE << 3) 612 : INTERRUPT_RATE, 613 sg, nents, 0, GFP_KERNEL); 614 615 if (retval) 616 break; 617 mod_timer(&timeout.timer, jiffies + 618 msecs_to_jiffies(SIMPLE_IO_TIMEOUT)); 619 usb_sg_wait(req); 620 if (!del_timer_sync(&timeout.timer)) 621 retval = -ETIMEDOUT; 622 else 623 retval = req->status; 624 destroy_timer_on_stack(&timeout.timer); 625 626 /* FIXME check resulting data pattern */ 627 628 /* FIXME if endpoint halted, clear halt (and log) */ 629 } 630 631 /* FIXME for unlink or fault handling tests, don't report 632 * failure if retval is as we expected ... 633 */ 634 if (retval) 635 ERROR(tdev, "perform_sglist failed, " 636 "iterations left %d, status %d\n", 637 iterations, retval); 638 return retval; 639 } 640 641 642 /*-------------------------------------------------------------------------*/ 643 644 /* unqueued control message testing 645 * 646 * there's a nice set of device functional requirements in chapter 9 of the 647 * usb 2.0 spec, which we can apply to ANY device, even ones that don't use 648 * special test firmware. 649 * 650 * we know the device is configured (or suspended) by the time it's visible 651 * through usbfs. we can't change that, so we won't test enumeration (which 652 * worked 'well enough' to get here, this time), power management (ditto), 653 * or remote wakeup (which needs human interaction). 654 */ 655 656 static unsigned realworld = 1; 657 module_param(realworld, uint, 0); 658 MODULE_PARM_DESC(realworld, "clear to demand stricter spec compliance"); 659 660 static int get_altsetting(struct usbtest_dev *dev) 661 { 662 struct usb_interface *iface = dev->intf; 663 struct usb_device *udev = interface_to_usbdev(iface); 664 int retval; 665 666 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 667 USB_REQ_GET_INTERFACE, USB_DIR_IN|USB_RECIP_INTERFACE, 668 0, iface->altsetting[0].desc.bInterfaceNumber, 669 dev->buf, 1, USB_CTRL_GET_TIMEOUT); 670 switch (retval) { 671 case 1: 672 return dev->buf[0]; 673 case 0: 674 retval = -ERANGE; 675 /* FALLTHROUGH */ 676 default: 677 return retval; 678 } 679 } 680 681 static int set_altsetting(struct usbtest_dev *dev, int alternate) 682 { 683 struct usb_interface *iface = dev->intf; 684 struct usb_device *udev; 685 686 if (alternate < 0 || alternate >= 256) 687 return -EINVAL; 688 689 udev = interface_to_usbdev(iface); 690 return usb_set_interface(udev, 691 iface->altsetting[0].desc.bInterfaceNumber, 692 alternate); 693 } 694 695 static int is_good_config(struct usbtest_dev *tdev, int len) 696 { 697 struct usb_config_descriptor *config; 698 699 if (len < sizeof(*config)) 700 return 0; 701 config = (struct usb_config_descriptor *) tdev->buf; 702 703 switch (config->bDescriptorType) { 704 case USB_DT_CONFIG: 705 case USB_DT_OTHER_SPEED_CONFIG: 706 if (config->bLength != 9) { 707 ERROR(tdev, "bogus config descriptor length\n"); 708 return 0; 709 } 710 /* this bit 'must be 1' but often isn't */ 711 if (!realworld && !(config->bmAttributes & 0x80)) { 712 ERROR(tdev, "high bit of config attributes not set\n"); 713 return 0; 714 } 715 if (config->bmAttributes & 0x1f) { /* reserved == 0 */ 716 ERROR(tdev, "reserved config bits set\n"); 717 return 0; 718 } 719 break; 720 default: 721 return 0; 722 } 723 724 if (le16_to_cpu(config->wTotalLength) == len) /* read it all */ 725 return 1; 726 if (le16_to_cpu(config->wTotalLength) >= TBUF_SIZE) /* max partial read */ 727 return 1; 728 ERROR(tdev, "bogus config descriptor read size\n"); 729 return 0; 730 } 731 732 static int is_good_ext(struct usbtest_dev *tdev, u8 *buf) 733 { 734 struct usb_ext_cap_descriptor *ext; 735 u32 attr; 736 737 ext = (struct usb_ext_cap_descriptor *) buf; 738 739 if (ext->bLength != USB_DT_USB_EXT_CAP_SIZE) { 740 ERROR(tdev, "bogus usb 2.0 extension descriptor length\n"); 741 return 0; 742 } 743 744 attr = le32_to_cpu(ext->bmAttributes); 745 /* bits[1:15] is used and others are reserved */ 746 if (attr & ~0xfffe) { /* reserved == 0 */ 747 ERROR(tdev, "reserved bits set\n"); 748 return 0; 749 } 750 751 return 1; 752 } 753 754 static int is_good_ss_cap(struct usbtest_dev *tdev, u8 *buf) 755 { 756 struct usb_ss_cap_descriptor *ss; 757 758 ss = (struct usb_ss_cap_descriptor *) buf; 759 760 if (ss->bLength != USB_DT_USB_SS_CAP_SIZE) { 761 ERROR(tdev, "bogus superspeed device capability descriptor length\n"); 762 return 0; 763 } 764 765 /* 766 * only bit[1] of bmAttributes is used for LTM and others are 767 * reserved 768 */ 769 if (ss->bmAttributes & ~0x02) { /* reserved == 0 */ 770 ERROR(tdev, "reserved bits set in bmAttributes\n"); 771 return 0; 772 } 773 774 /* bits[0:3] of wSpeedSupported is used and others are reserved */ 775 if (le16_to_cpu(ss->wSpeedSupported) & ~0x0f) { /* reserved == 0 */ 776 ERROR(tdev, "reserved bits set in wSpeedSupported\n"); 777 return 0; 778 } 779 780 return 1; 781 } 782 783 static int is_good_con_id(struct usbtest_dev *tdev, u8 *buf) 784 { 785 struct usb_ss_container_id_descriptor *con_id; 786 787 con_id = (struct usb_ss_container_id_descriptor *) buf; 788 789 if (con_id->bLength != USB_DT_USB_SS_CONTN_ID_SIZE) { 790 ERROR(tdev, "bogus container id descriptor length\n"); 791 return 0; 792 } 793 794 if (con_id->bReserved) { /* reserved == 0 */ 795 ERROR(tdev, "reserved bits set\n"); 796 return 0; 797 } 798 799 return 1; 800 } 801 802 /* sanity test for standard requests working with usb_control_mesg() and some 803 * of the utility functions which use it. 804 * 805 * this doesn't test how endpoint halts behave or data toggles get set, since 806 * we won't do I/O to bulk/interrupt endpoints here (which is how to change 807 * halt or toggle). toggle testing is impractical without support from hcds. 808 * 809 * this avoids failing devices linux would normally work with, by not testing 810 * config/altsetting operations for devices that only support their defaults. 811 * such devices rarely support those needless operations. 812 * 813 * NOTE that since this is a sanity test, it's not examining boundary cases 814 * to see if usbcore, hcd, and device all behave right. such testing would 815 * involve varied read sizes and other operation sequences. 816 */ 817 static int ch9_postconfig(struct usbtest_dev *dev) 818 { 819 struct usb_interface *iface = dev->intf; 820 struct usb_device *udev = interface_to_usbdev(iface); 821 int i, alt, retval; 822 823 /* [9.2.3] if there's more than one altsetting, we need to be able to 824 * set and get each one. mostly trusts the descriptors from usbcore. 825 */ 826 for (i = 0; i < iface->num_altsetting; i++) { 827 828 /* 9.2.3 constrains the range here */ 829 alt = iface->altsetting[i].desc.bAlternateSetting; 830 if (alt < 0 || alt >= iface->num_altsetting) { 831 dev_err(&iface->dev, 832 "invalid alt [%d].bAltSetting = %d\n", 833 i, alt); 834 } 835 836 /* [real world] get/set unimplemented if there's only one */ 837 if (realworld && iface->num_altsetting == 1) 838 continue; 839 840 /* [9.4.10] set_interface */ 841 retval = set_altsetting(dev, alt); 842 if (retval) { 843 dev_err(&iface->dev, "can't set_interface = %d, %d\n", 844 alt, retval); 845 return retval; 846 } 847 848 /* [9.4.4] get_interface always works */ 849 retval = get_altsetting(dev); 850 if (retval != alt) { 851 dev_err(&iface->dev, "get alt should be %d, was %d\n", 852 alt, retval); 853 return (retval < 0) ? retval : -EDOM; 854 } 855 856 } 857 858 /* [real world] get_config unimplemented if there's only one */ 859 if (!realworld || udev->descriptor.bNumConfigurations != 1) { 860 int expected = udev->actconfig->desc.bConfigurationValue; 861 862 /* [9.4.2] get_configuration always works 863 * ... although some cheap devices (like one TI Hub I've got) 864 * won't return config descriptors except before set_config. 865 */ 866 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 867 USB_REQ_GET_CONFIGURATION, 868 USB_DIR_IN | USB_RECIP_DEVICE, 869 0, 0, dev->buf, 1, USB_CTRL_GET_TIMEOUT); 870 if (retval != 1 || dev->buf[0] != expected) { 871 dev_err(&iface->dev, "get config --> %d %d (1 %d)\n", 872 retval, dev->buf[0], expected); 873 return (retval < 0) ? retval : -EDOM; 874 } 875 } 876 877 /* there's always [9.4.3] a device descriptor [9.6.1] */ 878 retval = usb_get_descriptor(udev, USB_DT_DEVICE, 0, 879 dev->buf, sizeof(udev->descriptor)); 880 if (retval != sizeof(udev->descriptor)) { 881 dev_err(&iface->dev, "dev descriptor --> %d\n", retval); 882 return (retval < 0) ? retval : -EDOM; 883 } 884 885 /* 886 * there's always [9.4.3] a bos device descriptor [9.6.2] in USB 887 * 3.0 spec 888 */ 889 if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0210) { 890 struct usb_bos_descriptor *bos = NULL; 891 struct usb_dev_cap_header *header = NULL; 892 unsigned total, num, length; 893 u8 *buf; 894 895 retval = usb_get_descriptor(udev, USB_DT_BOS, 0, dev->buf, 896 sizeof(*udev->bos->desc)); 897 if (retval != sizeof(*udev->bos->desc)) { 898 dev_err(&iface->dev, "bos descriptor --> %d\n", retval); 899 return (retval < 0) ? retval : -EDOM; 900 } 901 902 bos = (struct usb_bos_descriptor *)dev->buf; 903 total = le16_to_cpu(bos->wTotalLength); 904 num = bos->bNumDeviceCaps; 905 906 if (total > TBUF_SIZE) 907 total = TBUF_SIZE; 908 909 /* 910 * get generic device-level capability descriptors [9.6.2] 911 * in USB 3.0 spec 912 */ 913 retval = usb_get_descriptor(udev, USB_DT_BOS, 0, dev->buf, 914 total); 915 if (retval != total) { 916 dev_err(&iface->dev, "bos descriptor set --> %d\n", 917 retval); 918 return (retval < 0) ? retval : -EDOM; 919 } 920 921 length = sizeof(*udev->bos->desc); 922 buf = dev->buf; 923 for (i = 0; i < num; i++) { 924 buf += length; 925 if (buf + sizeof(struct usb_dev_cap_header) > 926 dev->buf + total) 927 break; 928 929 header = (struct usb_dev_cap_header *)buf; 930 length = header->bLength; 931 932 if (header->bDescriptorType != 933 USB_DT_DEVICE_CAPABILITY) { 934 dev_warn(&udev->dev, "not device capability descriptor, skip\n"); 935 continue; 936 } 937 938 switch (header->bDevCapabilityType) { 939 case USB_CAP_TYPE_EXT: 940 if (buf + USB_DT_USB_EXT_CAP_SIZE > 941 dev->buf + total || 942 !is_good_ext(dev, buf)) { 943 dev_err(&iface->dev, "bogus usb 2.0 extension descriptor\n"); 944 return -EDOM; 945 } 946 break; 947 case USB_SS_CAP_TYPE: 948 if (buf + USB_DT_USB_SS_CAP_SIZE > 949 dev->buf + total || 950 !is_good_ss_cap(dev, buf)) { 951 dev_err(&iface->dev, "bogus superspeed device capability descriptor\n"); 952 return -EDOM; 953 } 954 break; 955 case CONTAINER_ID_TYPE: 956 if (buf + USB_DT_USB_SS_CONTN_ID_SIZE > 957 dev->buf + total || 958 !is_good_con_id(dev, buf)) { 959 dev_err(&iface->dev, "bogus container id descriptor\n"); 960 return -EDOM; 961 } 962 break; 963 default: 964 break; 965 } 966 } 967 } 968 969 /* there's always [9.4.3] at least one config descriptor [9.6.3] */ 970 for (i = 0; i < udev->descriptor.bNumConfigurations; i++) { 971 retval = usb_get_descriptor(udev, USB_DT_CONFIG, i, 972 dev->buf, TBUF_SIZE); 973 if (!is_good_config(dev, retval)) { 974 dev_err(&iface->dev, 975 "config [%d] descriptor --> %d\n", 976 i, retval); 977 return (retval < 0) ? retval : -EDOM; 978 } 979 980 /* FIXME cross-checking udev->config[i] to make sure usbcore 981 * parsed it right (etc) would be good testing paranoia 982 */ 983 } 984 985 /* and sometimes [9.2.6.6] speed dependent descriptors */ 986 if (le16_to_cpu(udev->descriptor.bcdUSB) == 0x0200) { 987 struct usb_qualifier_descriptor *d = NULL; 988 989 /* device qualifier [9.6.2] */ 990 retval = usb_get_descriptor(udev, 991 USB_DT_DEVICE_QUALIFIER, 0, dev->buf, 992 sizeof(struct usb_qualifier_descriptor)); 993 if (retval == -EPIPE) { 994 if (udev->speed == USB_SPEED_HIGH) { 995 dev_err(&iface->dev, 996 "hs dev qualifier --> %d\n", 997 retval); 998 return retval; 999 } 1000 /* usb2.0 but not high-speed capable; fine */ 1001 } else if (retval != sizeof(struct usb_qualifier_descriptor)) { 1002 dev_err(&iface->dev, "dev qualifier --> %d\n", retval); 1003 return (retval < 0) ? retval : -EDOM; 1004 } else 1005 d = (struct usb_qualifier_descriptor *) dev->buf; 1006 1007 /* might not have [9.6.2] any other-speed configs [9.6.4] */ 1008 if (d) { 1009 unsigned max = d->bNumConfigurations; 1010 for (i = 0; i < max; i++) { 1011 retval = usb_get_descriptor(udev, 1012 USB_DT_OTHER_SPEED_CONFIG, i, 1013 dev->buf, TBUF_SIZE); 1014 if (!is_good_config(dev, retval)) { 1015 dev_err(&iface->dev, 1016 "other speed config --> %d\n", 1017 retval); 1018 return (retval < 0) ? retval : -EDOM; 1019 } 1020 } 1021 } 1022 } 1023 /* FIXME fetch strings from at least the device descriptor */ 1024 1025 /* [9.4.5] get_status always works */ 1026 retval = usb_get_status(udev, USB_RECIP_DEVICE, 0, dev->buf); 1027 if (retval) { 1028 dev_err(&iface->dev, "get dev status --> %d\n", retval); 1029 return retval; 1030 } 1031 1032 /* FIXME configuration.bmAttributes says if we could try to set/clear 1033 * the device's remote wakeup feature ... if we can, test that here 1034 */ 1035 1036 retval = usb_get_status(udev, USB_RECIP_INTERFACE, 1037 iface->altsetting[0].desc.bInterfaceNumber, dev->buf); 1038 if (retval) { 1039 dev_err(&iface->dev, "get interface status --> %d\n", retval); 1040 return retval; 1041 } 1042 /* FIXME get status for each endpoint in the interface */ 1043 1044 return 0; 1045 } 1046 1047 /*-------------------------------------------------------------------------*/ 1048 1049 /* use ch9 requests to test whether: 1050 * (a) queues work for control, keeping N subtests queued and 1051 * active (auto-resubmit) for M loops through the queue. 1052 * (b) protocol stalls (control-only) will autorecover. 1053 * it's not like bulk/intr; no halt clearing. 1054 * (c) short control reads are reported and handled. 1055 * (d) queues are always processed in-order 1056 */ 1057 1058 struct ctrl_ctx { 1059 spinlock_t lock; 1060 struct usbtest_dev *dev; 1061 struct completion complete; 1062 unsigned count; 1063 unsigned pending; 1064 int status; 1065 struct urb **urb; 1066 struct usbtest_param_32 *param; 1067 int last; 1068 }; 1069 1070 #define NUM_SUBCASES 16 /* how many test subcases here? */ 1071 1072 struct subcase { 1073 struct usb_ctrlrequest setup; 1074 int number; 1075 int expected; 1076 }; 1077 1078 static void ctrl_complete(struct urb *urb) 1079 { 1080 struct ctrl_ctx *ctx = urb->context; 1081 struct usb_ctrlrequest *reqp; 1082 struct subcase *subcase; 1083 int status = urb->status; 1084 1085 reqp = (struct usb_ctrlrequest *)urb->setup_packet; 1086 subcase = container_of(reqp, struct subcase, setup); 1087 1088 spin_lock(&ctx->lock); 1089 ctx->count--; 1090 ctx->pending--; 1091 1092 /* queue must transfer and complete in fifo order, unless 1093 * usb_unlink_urb() is used to unlink something not at the 1094 * physical queue head (not tested). 1095 */ 1096 if (subcase->number > 0) { 1097 if ((subcase->number - ctx->last) != 1) { 1098 ERROR(ctx->dev, 1099 "subcase %d completed out of order, last %d\n", 1100 subcase->number, ctx->last); 1101 status = -EDOM; 1102 ctx->last = subcase->number; 1103 goto error; 1104 } 1105 } 1106 ctx->last = subcase->number; 1107 1108 /* succeed or fault in only one way? */ 1109 if (status == subcase->expected) 1110 status = 0; 1111 1112 /* async unlink for cleanup? */ 1113 else if (status != -ECONNRESET) { 1114 1115 /* some faults are allowed, not required */ 1116 if (subcase->expected > 0 && ( 1117 ((status == -subcase->expected /* happened */ 1118 || status == 0)))) /* didn't */ 1119 status = 0; 1120 /* sometimes more than one fault is allowed */ 1121 else if (subcase->number == 12 && status == -EPIPE) 1122 status = 0; 1123 else 1124 ERROR(ctx->dev, "subtest %d error, status %d\n", 1125 subcase->number, status); 1126 } 1127 1128 /* unexpected status codes mean errors; ideally, in hardware */ 1129 if (status) { 1130 error: 1131 if (ctx->status == 0) { 1132 int i; 1133 1134 ctx->status = status; 1135 ERROR(ctx->dev, "control queue %02x.%02x, err %d, " 1136 "%d left, subcase %d, len %d/%d\n", 1137 reqp->bRequestType, reqp->bRequest, 1138 status, ctx->count, subcase->number, 1139 urb->actual_length, 1140 urb->transfer_buffer_length); 1141 1142 /* FIXME this "unlink everything" exit route should 1143 * be a separate test case. 1144 */ 1145 1146 /* unlink whatever's still pending */ 1147 for (i = 1; i < ctx->param->sglen; i++) { 1148 struct urb *u = ctx->urb[ 1149 (i + subcase->number) 1150 % ctx->param->sglen]; 1151 1152 if (u == urb || !u->dev) 1153 continue; 1154 spin_unlock(&ctx->lock); 1155 status = usb_unlink_urb(u); 1156 spin_lock(&ctx->lock); 1157 switch (status) { 1158 case -EINPROGRESS: 1159 case -EBUSY: 1160 case -EIDRM: 1161 continue; 1162 default: 1163 ERROR(ctx->dev, "urb unlink --> %d\n", 1164 status); 1165 } 1166 } 1167 status = ctx->status; 1168 } 1169 } 1170 1171 /* resubmit if we need to, else mark this as done */ 1172 if ((status == 0) && (ctx->pending < ctx->count)) { 1173 status = usb_submit_urb(urb, GFP_ATOMIC); 1174 if (status != 0) { 1175 ERROR(ctx->dev, 1176 "can't resubmit ctrl %02x.%02x, err %d\n", 1177 reqp->bRequestType, reqp->bRequest, status); 1178 urb->dev = NULL; 1179 } else 1180 ctx->pending++; 1181 } else 1182 urb->dev = NULL; 1183 1184 /* signal completion when nothing's queued */ 1185 if (ctx->pending == 0) 1186 complete(&ctx->complete); 1187 spin_unlock(&ctx->lock); 1188 } 1189 1190 static int 1191 test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param_32 *param) 1192 { 1193 struct usb_device *udev = testdev_to_usbdev(dev); 1194 struct urb **urb; 1195 struct ctrl_ctx context; 1196 int i; 1197 1198 if (param->sglen == 0 || param->iterations > UINT_MAX / param->sglen) 1199 return -EOPNOTSUPP; 1200 1201 spin_lock_init(&context.lock); 1202 context.dev = dev; 1203 init_completion(&context.complete); 1204 context.count = param->sglen * param->iterations; 1205 context.pending = 0; 1206 context.status = -ENOMEM; 1207 context.param = param; 1208 context.last = -1; 1209 1210 /* allocate and init the urbs we'll queue. 1211 * as with bulk/intr sglists, sglen is the queue depth; it also 1212 * controls which subtests run (more tests than sglen) or rerun. 1213 */ 1214 urb = kcalloc(param->sglen, sizeof(struct urb *), GFP_KERNEL); 1215 if (!urb) 1216 return -ENOMEM; 1217 for (i = 0; i < param->sglen; i++) { 1218 int pipe = usb_rcvctrlpipe(udev, 0); 1219 unsigned len; 1220 struct urb *u; 1221 struct usb_ctrlrequest req; 1222 struct subcase *reqp; 1223 1224 /* sign of this variable means: 1225 * -: tested code must return this (negative) error code 1226 * +: tested code may return this (negative too) error code 1227 */ 1228 int expected = 0; 1229 1230 /* requests here are mostly expected to succeed on any 1231 * device, but some are chosen to trigger protocol stalls 1232 * or short reads. 1233 */ 1234 memset(&req, 0, sizeof(req)); 1235 req.bRequest = USB_REQ_GET_DESCRIPTOR; 1236 req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE; 1237 1238 switch (i % NUM_SUBCASES) { 1239 case 0: /* get device descriptor */ 1240 req.wValue = cpu_to_le16(USB_DT_DEVICE << 8); 1241 len = sizeof(struct usb_device_descriptor); 1242 break; 1243 case 1: /* get first config descriptor (only) */ 1244 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0); 1245 len = sizeof(struct usb_config_descriptor); 1246 break; 1247 case 2: /* get altsetting (OFTEN STALLS) */ 1248 req.bRequest = USB_REQ_GET_INTERFACE; 1249 req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE; 1250 /* index = 0 means first interface */ 1251 len = 1; 1252 expected = EPIPE; 1253 break; 1254 case 3: /* get interface status */ 1255 req.bRequest = USB_REQ_GET_STATUS; 1256 req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE; 1257 /* interface 0 */ 1258 len = 2; 1259 break; 1260 case 4: /* get device status */ 1261 req.bRequest = USB_REQ_GET_STATUS; 1262 req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE; 1263 len = 2; 1264 break; 1265 case 5: /* get device qualifier (MAY STALL) */ 1266 req.wValue = cpu_to_le16 (USB_DT_DEVICE_QUALIFIER << 8); 1267 len = sizeof(struct usb_qualifier_descriptor); 1268 if (udev->speed != USB_SPEED_HIGH) 1269 expected = EPIPE; 1270 break; 1271 case 6: /* get first config descriptor, plus interface */ 1272 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0); 1273 len = sizeof(struct usb_config_descriptor); 1274 len += sizeof(struct usb_interface_descriptor); 1275 break; 1276 case 7: /* get interface descriptor (ALWAYS STALLS) */ 1277 req.wValue = cpu_to_le16 (USB_DT_INTERFACE << 8); 1278 /* interface == 0 */ 1279 len = sizeof(struct usb_interface_descriptor); 1280 expected = -EPIPE; 1281 break; 1282 /* NOTE: two consecutive stalls in the queue here. 1283 * that tests fault recovery a bit more aggressively. */ 1284 case 8: /* clear endpoint halt (MAY STALL) */ 1285 req.bRequest = USB_REQ_CLEAR_FEATURE; 1286 req.bRequestType = USB_RECIP_ENDPOINT; 1287 /* wValue 0 == ep halt */ 1288 /* wIndex 0 == ep0 (shouldn't halt!) */ 1289 len = 0; 1290 pipe = usb_sndctrlpipe(udev, 0); 1291 expected = EPIPE; 1292 break; 1293 case 9: /* get endpoint status */ 1294 req.bRequest = USB_REQ_GET_STATUS; 1295 req.bRequestType = USB_DIR_IN|USB_RECIP_ENDPOINT; 1296 /* endpoint 0 */ 1297 len = 2; 1298 break; 1299 case 10: /* trigger short read (EREMOTEIO) */ 1300 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0); 1301 len = 1024; 1302 expected = -EREMOTEIO; 1303 break; 1304 /* NOTE: two consecutive _different_ faults in the queue. */ 1305 case 11: /* get endpoint descriptor (ALWAYS STALLS) */ 1306 req.wValue = cpu_to_le16(USB_DT_ENDPOINT << 8); 1307 /* endpoint == 0 */ 1308 len = sizeof(struct usb_interface_descriptor); 1309 expected = EPIPE; 1310 break; 1311 /* NOTE: sometimes even a third fault in the queue! */ 1312 case 12: /* get string 0 descriptor (MAY STALL) */ 1313 req.wValue = cpu_to_le16(USB_DT_STRING << 8); 1314 /* string == 0, for language IDs */ 1315 len = sizeof(struct usb_interface_descriptor); 1316 /* may succeed when > 4 languages */ 1317 expected = EREMOTEIO; /* or EPIPE, if no strings */ 1318 break; 1319 case 13: /* short read, resembling case 10 */ 1320 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0); 1321 /* last data packet "should" be DATA1, not DATA0 */ 1322 if (udev->speed == USB_SPEED_SUPER) 1323 len = 1024 - 512; 1324 else 1325 len = 1024 - udev->descriptor.bMaxPacketSize0; 1326 expected = -EREMOTEIO; 1327 break; 1328 case 14: /* short read; try to fill the last packet */ 1329 req.wValue = cpu_to_le16((USB_DT_DEVICE << 8) | 0); 1330 /* device descriptor size == 18 bytes */ 1331 len = udev->descriptor.bMaxPacketSize0; 1332 if (udev->speed == USB_SPEED_SUPER) 1333 len = 512; 1334 switch (len) { 1335 case 8: 1336 len = 24; 1337 break; 1338 case 16: 1339 len = 32; 1340 break; 1341 } 1342 expected = -EREMOTEIO; 1343 break; 1344 case 15: 1345 req.wValue = cpu_to_le16(USB_DT_BOS << 8); 1346 if (udev->bos) 1347 len = le16_to_cpu(udev->bos->desc->wTotalLength); 1348 else 1349 len = sizeof(struct usb_bos_descriptor); 1350 if (le16_to_cpu(udev->descriptor.bcdUSB) < 0x0201) 1351 expected = -EPIPE; 1352 break; 1353 default: 1354 ERROR(dev, "bogus number of ctrl queue testcases!\n"); 1355 context.status = -EINVAL; 1356 goto cleanup; 1357 } 1358 req.wLength = cpu_to_le16(len); 1359 urb[i] = u = simple_alloc_urb(udev, pipe, len, 0); 1360 if (!u) 1361 goto cleanup; 1362 1363 reqp = kmalloc(sizeof(*reqp), GFP_KERNEL); 1364 if (!reqp) 1365 goto cleanup; 1366 reqp->setup = req; 1367 reqp->number = i % NUM_SUBCASES; 1368 reqp->expected = expected; 1369 u->setup_packet = (char *) &reqp->setup; 1370 1371 u->context = &context; 1372 u->complete = ctrl_complete; 1373 } 1374 1375 /* queue the urbs */ 1376 context.urb = urb; 1377 spin_lock_irq(&context.lock); 1378 for (i = 0; i < param->sglen; i++) { 1379 context.status = usb_submit_urb(urb[i], GFP_ATOMIC); 1380 if (context.status != 0) { 1381 ERROR(dev, "can't submit urb[%d], status %d\n", 1382 i, context.status); 1383 context.count = context.pending; 1384 break; 1385 } 1386 context.pending++; 1387 } 1388 spin_unlock_irq(&context.lock); 1389 1390 /* FIXME set timer and time out; provide a disconnect hook */ 1391 1392 /* wait for the last one to complete */ 1393 if (context.pending > 0) 1394 wait_for_completion(&context.complete); 1395 1396 cleanup: 1397 for (i = 0; i < param->sglen; i++) { 1398 if (!urb[i]) 1399 continue; 1400 urb[i]->dev = udev; 1401 kfree(urb[i]->setup_packet); 1402 simple_free_urb(urb[i]); 1403 } 1404 kfree(urb); 1405 return context.status; 1406 } 1407 #undef NUM_SUBCASES 1408 1409 1410 /*-------------------------------------------------------------------------*/ 1411 1412 static void unlink1_callback(struct urb *urb) 1413 { 1414 int status = urb->status; 1415 1416 /* we "know" -EPIPE (stall) never happens */ 1417 if (!status) 1418 status = usb_submit_urb(urb, GFP_ATOMIC); 1419 if (status) { 1420 urb->status = status; 1421 complete(urb->context); 1422 } 1423 } 1424 1425 static int unlink1(struct usbtest_dev *dev, int pipe, int size, int async) 1426 { 1427 struct urb *urb; 1428 struct completion completion; 1429 int retval = 0; 1430 1431 init_completion(&completion); 1432 urb = simple_alloc_urb(testdev_to_usbdev(dev), pipe, size, 0); 1433 if (!urb) 1434 return -ENOMEM; 1435 urb->context = &completion; 1436 urb->complete = unlink1_callback; 1437 1438 if (usb_pipeout(urb->pipe)) { 1439 simple_fill_buf(urb); 1440 urb->transfer_flags |= URB_ZERO_PACKET; 1441 } 1442 1443 /* keep the endpoint busy. there are lots of hc/hcd-internal 1444 * states, and testing should get to all of them over time. 1445 * 1446 * FIXME want additional tests for when endpoint is STALLing 1447 * due to errors, or is just NAKing requests. 1448 */ 1449 retval = usb_submit_urb(urb, GFP_KERNEL); 1450 if (retval != 0) { 1451 dev_err(&dev->intf->dev, "submit fail %d\n", retval); 1452 return retval; 1453 } 1454 1455 /* unlinking that should always work. variable delay tests more 1456 * hcd states and code paths, even with little other system load. 1457 */ 1458 msleep(jiffies % (2 * INTERRUPT_RATE)); 1459 if (async) { 1460 while (!completion_done(&completion)) { 1461 retval = usb_unlink_urb(urb); 1462 1463 if (retval == 0 && usb_pipein(urb->pipe)) 1464 retval = simple_check_buf(dev, urb); 1465 1466 switch (retval) { 1467 case -EBUSY: 1468 case -EIDRM: 1469 /* we can't unlink urbs while they're completing 1470 * or if they've completed, and we haven't 1471 * resubmitted. "normal" drivers would prevent 1472 * resubmission, but since we're testing unlink 1473 * paths, we can't. 1474 */ 1475 ERROR(dev, "unlink retry\n"); 1476 continue; 1477 case 0: 1478 case -EINPROGRESS: 1479 break; 1480 1481 default: 1482 dev_err(&dev->intf->dev, 1483 "unlink fail %d\n", retval); 1484 return retval; 1485 } 1486 1487 break; 1488 } 1489 } else 1490 usb_kill_urb(urb); 1491 1492 wait_for_completion(&completion); 1493 retval = urb->status; 1494 simple_free_urb(urb); 1495 1496 if (async) 1497 return (retval == -ECONNRESET) ? 0 : retval - 1000; 1498 else 1499 return (retval == -ENOENT || retval == -EPERM) ? 1500 0 : retval - 2000; 1501 } 1502 1503 static int unlink_simple(struct usbtest_dev *dev, int pipe, int len) 1504 { 1505 int retval = 0; 1506 1507 /* test sync and async paths */ 1508 retval = unlink1(dev, pipe, len, 1); 1509 if (!retval) 1510 retval = unlink1(dev, pipe, len, 0); 1511 return retval; 1512 } 1513 1514 /*-------------------------------------------------------------------------*/ 1515 1516 struct queued_ctx { 1517 struct completion complete; 1518 atomic_t pending; 1519 unsigned num; 1520 int status; 1521 struct urb **urbs; 1522 }; 1523 1524 static void unlink_queued_callback(struct urb *urb) 1525 { 1526 int status = urb->status; 1527 struct queued_ctx *ctx = urb->context; 1528 1529 if (ctx->status) 1530 goto done; 1531 if (urb == ctx->urbs[ctx->num - 4] || urb == ctx->urbs[ctx->num - 2]) { 1532 if (status == -ECONNRESET) 1533 goto done; 1534 /* What error should we report if the URB completed normally? */ 1535 } 1536 if (status != 0) 1537 ctx->status = status; 1538 1539 done: 1540 if (atomic_dec_and_test(&ctx->pending)) 1541 complete(&ctx->complete); 1542 } 1543 1544 static int unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num, 1545 unsigned size) 1546 { 1547 struct queued_ctx ctx; 1548 struct usb_device *udev = testdev_to_usbdev(dev); 1549 void *buf; 1550 dma_addr_t buf_dma; 1551 int i; 1552 int retval = -ENOMEM; 1553 1554 init_completion(&ctx.complete); 1555 atomic_set(&ctx.pending, 1); /* One more than the actual value */ 1556 ctx.num = num; 1557 ctx.status = 0; 1558 1559 buf = usb_alloc_coherent(udev, size, GFP_KERNEL, &buf_dma); 1560 if (!buf) 1561 return retval; 1562 memset(buf, 0, size); 1563 1564 /* Allocate and init the urbs we'll queue */ 1565 ctx.urbs = kcalloc(num, sizeof(struct urb *), GFP_KERNEL); 1566 if (!ctx.urbs) 1567 goto free_buf; 1568 for (i = 0; i < num; i++) { 1569 ctx.urbs[i] = usb_alloc_urb(0, GFP_KERNEL); 1570 if (!ctx.urbs[i]) 1571 goto free_urbs; 1572 usb_fill_bulk_urb(ctx.urbs[i], udev, pipe, buf, size, 1573 unlink_queued_callback, &ctx); 1574 ctx.urbs[i]->transfer_dma = buf_dma; 1575 ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP; 1576 1577 if (usb_pipeout(ctx.urbs[i]->pipe)) { 1578 simple_fill_buf(ctx.urbs[i]); 1579 ctx.urbs[i]->transfer_flags |= URB_ZERO_PACKET; 1580 } 1581 } 1582 1583 /* Submit all the URBs and then unlink URBs num - 4 and num - 2. */ 1584 for (i = 0; i < num; i++) { 1585 atomic_inc(&ctx.pending); 1586 retval = usb_submit_urb(ctx.urbs[i], GFP_KERNEL); 1587 if (retval != 0) { 1588 dev_err(&dev->intf->dev, "submit urbs[%d] fail %d\n", 1589 i, retval); 1590 atomic_dec(&ctx.pending); 1591 ctx.status = retval; 1592 break; 1593 } 1594 } 1595 if (i == num) { 1596 usb_unlink_urb(ctx.urbs[num - 4]); 1597 usb_unlink_urb(ctx.urbs[num - 2]); 1598 } else { 1599 while (--i >= 0) 1600 usb_unlink_urb(ctx.urbs[i]); 1601 } 1602 1603 if (atomic_dec_and_test(&ctx.pending)) /* The extra count */ 1604 complete(&ctx.complete); 1605 wait_for_completion(&ctx.complete); 1606 retval = ctx.status; 1607 1608 free_urbs: 1609 for (i = 0; i < num; i++) 1610 usb_free_urb(ctx.urbs[i]); 1611 kfree(ctx.urbs); 1612 free_buf: 1613 usb_free_coherent(udev, size, buf, buf_dma); 1614 return retval; 1615 } 1616 1617 /*-------------------------------------------------------------------------*/ 1618 1619 static int verify_not_halted(struct usbtest_dev *tdev, int ep, struct urb *urb) 1620 { 1621 int retval; 1622 u16 status; 1623 1624 /* shouldn't look or act halted */ 1625 retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status); 1626 if (retval < 0) { 1627 ERROR(tdev, "ep %02x couldn't get no-halt status, %d\n", 1628 ep, retval); 1629 return retval; 1630 } 1631 if (status != 0) { 1632 ERROR(tdev, "ep %02x bogus status: %04x != 0\n", ep, status); 1633 return -EINVAL; 1634 } 1635 retval = simple_io(tdev, urb, 1, 0, 0, __func__); 1636 if (retval != 0) 1637 return -EINVAL; 1638 return 0; 1639 } 1640 1641 static int verify_halted(struct usbtest_dev *tdev, int ep, struct urb *urb) 1642 { 1643 int retval; 1644 u16 status; 1645 1646 /* should look and act halted */ 1647 retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status); 1648 if (retval < 0) { 1649 ERROR(tdev, "ep %02x couldn't get halt status, %d\n", 1650 ep, retval); 1651 return retval; 1652 } 1653 if (status != 1) { 1654 ERROR(tdev, "ep %02x bogus status: %04x != 1\n", ep, status); 1655 return -EINVAL; 1656 } 1657 retval = simple_io(tdev, urb, 1, 0, -EPIPE, __func__); 1658 if (retval != -EPIPE) 1659 return -EINVAL; 1660 retval = simple_io(tdev, urb, 1, 0, -EPIPE, "verify_still_halted"); 1661 if (retval != -EPIPE) 1662 return -EINVAL; 1663 return 0; 1664 } 1665 1666 static int test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb) 1667 { 1668 int retval; 1669 1670 /* shouldn't look or act halted now */ 1671 retval = verify_not_halted(tdev, ep, urb); 1672 if (retval < 0) 1673 return retval; 1674 1675 /* set halt (protocol test only), verify it worked */ 1676 retval = usb_control_msg(urb->dev, usb_sndctrlpipe(urb->dev, 0), 1677 USB_REQ_SET_FEATURE, USB_RECIP_ENDPOINT, 1678 USB_ENDPOINT_HALT, ep, 1679 NULL, 0, USB_CTRL_SET_TIMEOUT); 1680 if (retval < 0) { 1681 ERROR(tdev, "ep %02x couldn't set halt, %d\n", ep, retval); 1682 return retval; 1683 } 1684 retval = verify_halted(tdev, ep, urb); 1685 if (retval < 0) { 1686 int ret; 1687 1688 /* clear halt anyways, else further tests will fail */ 1689 ret = usb_clear_halt(urb->dev, urb->pipe); 1690 if (ret) 1691 ERROR(tdev, "ep %02x couldn't clear halt, %d\n", 1692 ep, ret); 1693 1694 return retval; 1695 } 1696 1697 /* clear halt (tests API + protocol), verify it worked */ 1698 retval = usb_clear_halt(urb->dev, urb->pipe); 1699 if (retval < 0) { 1700 ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval); 1701 return retval; 1702 } 1703 retval = verify_not_halted(tdev, ep, urb); 1704 if (retval < 0) 1705 return retval; 1706 1707 /* NOTE: could also verify SET_INTERFACE clear halts ... */ 1708 1709 return 0; 1710 } 1711 1712 static int halt_simple(struct usbtest_dev *dev) 1713 { 1714 int ep; 1715 int retval = 0; 1716 struct urb *urb; 1717 struct usb_device *udev = testdev_to_usbdev(dev); 1718 1719 if (udev->speed == USB_SPEED_SUPER) 1720 urb = simple_alloc_urb(udev, 0, 1024, 0); 1721 else 1722 urb = simple_alloc_urb(udev, 0, 512, 0); 1723 if (urb == NULL) 1724 return -ENOMEM; 1725 1726 if (dev->in_pipe) { 1727 ep = usb_pipeendpoint(dev->in_pipe) | USB_DIR_IN; 1728 urb->pipe = dev->in_pipe; 1729 retval = test_halt(dev, ep, urb); 1730 if (retval < 0) 1731 goto done; 1732 } 1733 1734 if (dev->out_pipe) { 1735 ep = usb_pipeendpoint(dev->out_pipe); 1736 urb->pipe = dev->out_pipe; 1737 retval = test_halt(dev, ep, urb); 1738 } 1739 done: 1740 simple_free_urb(urb); 1741 return retval; 1742 } 1743 1744 /*-------------------------------------------------------------------------*/ 1745 1746 /* Control OUT tests use the vendor control requests from Intel's 1747 * USB 2.0 compliance test device: write a buffer, read it back. 1748 * 1749 * Intel's spec only _requires_ that it work for one packet, which 1750 * is pretty weak. Some HCDs place limits here; most devices will 1751 * need to be able to handle more than one OUT data packet. We'll 1752 * try whatever we're told to try. 1753 */ 1754 static int ctrl_out(struct usbtest_dev *dev, 1755 unsigned count, unsigned length, unsigned vary, unsigned offset) 1756 { 1757 unsigned i, j, len; 1758 int retval; 1759 u8 *buf; 1760 char *what = "?"; 1761 struct usb_device *udev; 1762 1763 if (length < 1 || length > 0xffff || vary >= length) 1764 return -EINVAL; 1765 1766 buf = kmalloc(length + offset, GFP_KERNEL); 1767 if (!buf) 1768 return -ENOMEM; 1769 1770 buf += offset; 1771 udev = testdev_to_usbdev(dev); 1772 len = length; 1773 retval = 0; 1774 1775 /* NOTE: hardware might well act differently if we pushed it 1776 * with lots back-to-back queued requests. 1777 */ 1778 for (i = 0; i < count; i++) { 1779 /* write patterned data */ 1780 for (j = 0; j < len; j++) 1781 buf[j] = (u8)(i + j); 1782 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 1783 0x5b, USB_DIR_OUT|USB_TYPE_VENDOR, 1784 0, 0, buf, len, USB_CTRL_SET_TIMEOUT); 1785 if (retval != len) { 1786 what = "write"; 1787 if (retval >= 0) { 1788 ERROR(dev, "ctrl_out, wlen %d (expected %d)\n", 1789 retval, len); 1790 retval = -EBADMSG; 1791 } 1792 break; 1793 } 1794 1795 /* read it back -- assuming nothing intervened!! */ 1796 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 1797 0x5c, USB_DIR_IN|USB_TYPE_VENDOR, 1798 0, 0, buf, len, USB_CTRL_GET_TIMEOUT); 1799 if (retval != len) { 1800 what = "read"; 1801 if (retval >= 0) { 1802 ERROR(dev, "ctrl_out, rlen %d (expected %d)\n", 1803 retval, len); 1804 retval = -EBADMSG; 1805 } 1806 break; 1807 } 1808 1809 /* fail if we can't verify */ 1810 for (j = 0; j < len; j++) { 1811 if (buf[j] != (u8)(i + j)) { 1812 ERROR(dev, "ctrl_out, byte %d is %d not %d\n", 1813 j, buf[j], (u8)(i + j)); 1814 retval = -EBADMSG; 1815 break; 1816 } 1817 } 1818 if (retval < 0) { 1819 what = "verify"; 1820 break; 1821 } 1822 1823 len += vary; 1824 1825 /* [real world] the "zero bytes IN" case isn't really used. 1826 * hardware can easily trip up in this weird case, since its 1827 * status stage is IN, not OUT like other ep0in transfers. 1828 */ 1829 if (len > length) 1830 len = realworld ? 1 : 0; 1831 } 1832 1833 if (retval < 0) 1834 ERROR(dev, "ctrl_out %s failed, code %d, count %d\n", 1835 what, retval, i); 1836 1837 kfree(buf - offset); 1838 return retval; 1839 } 1840 1841 /*-------------------------------------------------------------------------*/ 1842 1843 /* ISO/BULK tests ... mimics common usage 1844 * - buffer length is split into N packets (mostly maxpacket sized) 1845 * - multi-buffers according to sglen 1846 */ 1847 1848 struct transfer_context { 1849 unsigned count; 1850 unsigned pending; 1851 spinlock_t lock; 1852 struct completion done; 1853 int submit_error; 1854 unsigned long errors; 1855 unsigned long packet_count; 1856 struct usbtest_dev *dev; 1857 bool is_iso; 1858 }; 1859 1860 static void complicated_callback(struct urb *urb) 1861 { 1862 struct transfer_context *ctx = urb->context; 1863 1864 spin_lock(&ctx->lock); 1865 ctx->count--; 1866 1867 ctx->packet_count += urb->number_of_packets; 1868 if (urb->error_count > 0) 1869 ctx->errors += urb->error_count; 1870 else if (urb->status != 0) 1871 ctx->errors += (ctx->is_iso ? urb->number_of_packets : 1); 1872 else if (urb->actual_length != urb->transfer_buffer_length) 1873 ctx->errors++; 1874 else if (check_guard_bytes(ctx->dev, urb) != 0) 1875 ctx->errors++; 1876 1877 if (urb->status == 0 && ctx->count > (ctx->pending - 1) 1878 && !ctx->submit_error) { 1879 int status = usb_submit_urb(urb, GFP_ATOMIC); 1880 switch (status) { 1881 case 0: 1882 goto done; 1883 default: 1884 dev_err(&ctx->dev->intf->dev, 1885 "resubmit err %d\n", 1886 status); 1887 /* FALLTHROUGH */ 1888 case -ENODEV: /* disconnected */ 1889 case -ESHUTDOWN: /* endpoint disabled */ 1890 ctx->submit_error = 1; 1891 break; 1892 } 1893 } 1894 1895 ctx->pending--; 1896 if (ctx->pending == 0) { 1897 if (ctx->errors) 1898 dev_err(&ctx->dev->intf->dev, 1899 "during the test, %lu errors out of %lu\n", 1900 ctx->errors, ctx->packet_count); 1901 complete(&ctx->done); 1902 } 1903 done: 1904 spin_unlock(&ctx->lock); 1905 } 1906 1907 static struct urb *iso_alloc_urb( 1908 struct usb_device *udev, 1909 int pipe, 1910 struct usb_endpoint_descriptor *desc, 1911 long bytes, 1912 unsigned offset 1913 ) 1914 { 1915 struct urb *urb; 1916 unsigned i, maxp, packets; 1917 1918 if (bytes < 0 || !desc) 1919 return NULL; 1920 maxp = 0x7ff & usb_endpoint_maxp(desc); 1921 maxp *= usb_endpoint_maxp_mult(desc); 1922 packets = DIV_ROUND_UP(bytes, maxp); 1923 1924 urb = usb_alloc_urb(packets, GFP_KERNEL); 1925 if (!urb) 1926 return urb; 1927 urb->dev = udev; 1928 urb->pipe = pipe; 1929 1930 urb->number_of_packets = packets; 1931 urb->transfer_buffer_length = bytes; 1932 urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset, 1933 GFP_KERNEL, 1934 &urb->transfer_dma); 1935 if (!urb->transfer_buffer) { 1936 usb_free_urb(urb); 1937 return NULL; 1938 } 1939 if (offset) { 1940 memset(urb->transfer_buffer, GUARD_BYTE, offset); 1941 urb->transfer_buffer += offset; 1942 urb->transfer_dma += offset; 1943 } 1944 /* For inbound transfers use guard byte so that test fails if 1945 data not correctly copied */ 1946 memset(urb->transfer_buffer, 1947 usb_pipein(urb->pipe) ? GUARD_BYTE : 0, 1948 bytes); 1949 1950 for (i = 0; i < packets; i++) { 1951 /* here, only the last packet will be short */ 1952 urb->iso_frame_desc[i].length = min((unsigned) bytes, maxp); 1953 bytes -= urb->iso_frame_desc[i].length; 1954 1955 urb->iso_frame_desc[i].offset = maxp * i; 1956 } 1957 1958 urb->complete = complicated_callback; 1959 /* urb->context = SET BY CALLER */ 1960 urb->interval = 1 << (desc->bInterval - 1); 1961 urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; 1962 return urb; 1963 } 1964 1965 static int 1966 test_queue(struct usbtest_dev *dev, struct usbtest_param_32 *param, 1967 int pipe, struct usb_endpoint_descriptor *desc, unsigned offset) 1968 { 1969 struct transfer_context context; 1970 struct usb_device *udev; 1971 unsigned i; 1972 unsigned long packets = 0; 1973 int status = 0; 1974 struct urb *urbs[param->sglen]; 1975 1976 if (!param->sglen || param->iterations > UINT_MAX / param->sglen) 1977 return -EINVAL; 1978 1979 memset(&context, 0, sizeof(context)); 1980 context.count = param->iterations * param->sglen; 1981 context.dev = dev; 1982 context.is_iso = !!desc; 1983 init_completion(&context.done); 1984 spin_lock_init(&context.lock); 1985 1986 udev = testdev_to_usbdev(dev); 1987 1988 for (i = 0; i < param->sglen; i++) { 1989 if (context.is_iso) 1990 urbs[i] = iso_alloc_urb(udev, pipe, desc, 1991 param->length, offset); 1992 else 1993 urbs[i] = complicated_alloc_urb(udev, pipe, 1994 param->length, 0); 1995 1996 if (!urbs[i]) { 1997 status = -ENOMEM; 1998 goto fail; 1999 } 2000 packets += urbs[i]->number_of_packets; 2001 urbs[i]->context = &context; 2002 } 2003 packets *= param->iterations; 2004 2005 if (context.is_iso) { 2006 dev_info(&dev->intf->dev, 2007 "iso period %d %sframes, wMaxPacket %d, transactions: %d\n", 2008 1 << (desc->bInterval - 1), 2009 (udev->speed == USB_SPEED_HIGH) ? "micro" : "", 2010 usb_endpoint_maxp(desc), 2011 usb_endpoint_maxp_mult(desc)); 2012 2013 dev_info(&dev->intf->dev, 2014 "total %lu msec (%lu packets)\n", 2015 (packets * (1 << (desc->bInterval - 1))) 2016 / ((udev->speed == USB_SPEED_HIGH) ? 8 : 1), 2017 packets); 2018 } 2019 2020 spin_lock_irq(&context.lock); 2021 for (i = 0; i < param->sglen; i++) { 2022 ++context.pending; 2023 status = usb_submit_urb(urbs[i], GFP_ATOMIC); 2024 if (status < 0) { 2025 ERROR(dev, "submit iso[%d], error %d\n", i, status); 2026 if (i == 0) { 2027 spin_unlock_irq(&context.lock); 2028 goto fail; 2029 } 2030 2031 simple_free_urb(urbs[i]); 2032 urbs[i] = NULL; 2033 context.pending--; 2034 context.submit_error = 1; 2035 break; 2036 } 2037 } 2038 spin_unlock_irq(&context.lock); 2039 2040 wait_for_completion(&context.done); 2041 2042 for (i = 0; i < param->sglen; i++) { 2043 if (urbs[i]) 2044 simple_free_urb(urbs[i]); 2045 } 2046 /* 2047 * Isochronous transfers are expected to fail sometimes. As an 2048 * arbitrary limit, we will report an error if any submissions 2049 * fail or if the transfer failure rate is > 10%. 2050 */ 2051 if (status != 0) 2052 ; 2053 else if (context.submit_error) 2054 status = -EACCES; 2055 else if (context.errors > 2056 (context.is_iso ? context.packet_count / 10 : 0)) 2057 status = -EIO; 2058 return status; 2059 2060 fail: 2061 for (i = 0; i < param->sglen; i++) { 2062 if (urbs[i]) 2063 simple_free_urb(urbs[i]); 2064 } 2065 return status; 2066 } 2067 2068 static int test_unaligned_bulk( 2069 struct usbtest_dev *tdev, 2070 int pipe, 2071 unsigned length, 2072 int iterations, 2073 unsigned transfer_flags, 2074 const char *label) 2075 { 2076 int retval; 2077 struct urb *urb = usbtest_alloc_urb(testdev_to_usbdev(tdev), 2078 pipe, length, transfer_flags, 1, 0, simple_callback); 2079 2080 if (!urb) 2081 return -ENOMEM; 2082 2083 retval = simple_io(tdev, urb, iterations, 0, 0, label); 2084 simple_free_urb(urb); 2085 return retval; 2086 } 2087 2088 /* Run tests. */ 2089 static int 2090 usbtest_do_ioctl(struct usb_interface *intf, struct usbtest_param_32 *param) 2091 { 2092 struct usbtest_dev *dev = usb_get_intfdata(intf); 2093 struct usb_device *udev = testdev_to_usbdev(dev); 2094 struct urb *urb; 2095 struct scatterlist *sg; 2096 struct usb_sg_request req; 2097 unsigned i; 2098 int retval = -EOPNOTSUPP; 2099 2100 if (param->iterations <= 0) 2101 return -EINVAL; 2102 if (param->sglen > MAX_SGLEN) 2103 return -EINVAL; 2104 /* 2105 * Just a bunch of test cases that every HCD is expected to handle. 2106 * 2107 * Some may need specific firmware, though it'd be good to have 2108 * one firmware image to handle all the test cases. 2109 * 2110 * FIXME add more tests! cancel requests, verify the data, control 2111 * queueing, concurrent read+write threads, and so on. 2112 */ 2113 switch (param->test_num) { 2114 2115 case 0: 2116 dev_info(&intf->dev, "TEST 0: NOP\n"); 2117 retval = 0; 2118 break; 2119 2120 /* Simple non-queued bulk I/O tests */ 2121 case 1: 2122 if (dev->out_pipe == 0) 2123 break; 2124 dev_info(&intf->dev, 2125 "TEST 1: write %d bytes %u times\n", 2126 param->length, param->iterations); 2127 urb = simple_alloc_urb(udev, dev->out_pipe, param->length, 0); 2128 if (!urb) { 2129 retval = -ENOMEM; 2130 break; 2131 } 2132 /* FIRMWARE: bulk sink (maybe accepts short writes) */ 2133 retval = simple_io(dev, urb, param->iterations, 0, 0, "test1"); 2134 simple_free_urb(urb); 2135 break; 2136 case 2: 2137 if (dev->in_pipe == 0) 2138 break; 2139 dev_info(&intf->dev, 2140 "TEST 2: read %d bytes %u times\n", 2141 param->length, param->iterations); 2142 urb = simple_alloc_urb(udev, dev->in_pipe, param->length, 0); 2143 if (!urb) { 2144 retval = -ENOMEM; 2145 break; 2146 } 2147 /* FIRMWARE: bulk source (maybe generates short writes) */ 2148 retval = simple_io(dev, urb, param->iterations, 0, 0, "test2"); 2149 simple_free_urb(urb); 2150 break; 2151 case 3: 2152 if (dev->out_pipe == 0 || param->vary == 0) 2153 break; 2154 dev_info(&intf->dev, 2155 "TEST 3: write/%d 0..%d bytes %u times\n", 2156 param->vary, param->length, param->iterations); 2157 urb = simple_alloc_urb(udev, dev->out_pipe, param->length, 0); 2158 if (!urb) { 2159 retval = -ENOMEM; 2160 break; 2161 } 2162 /* FIRMWARE: bulk sink (maybe accepts short writes) */ 2163 retval = simple_io(dev, urb, param->iterations, param->vary, 2164 0, "test3"); 2165 simple_free_urb(urb); 2166 break; 2167 case 4: 2168 if (dev->in_pipe == 0 || param->vary == 0) 2169 break; 2170 dev_info(&intf->dev, 2171 "TEST 4: read/%d 0..%d bytes %u times\n", 2172 param->vary, param->length, param->iterations); 2173 urb = simple_alloc_urb(udev, dev->in_pipe, param->length, 0); 2174 if (!urb) { 2175 retval = -ENOMEM; 2176 break; 2177 } 2178 /* FIRMWARE: bulk source (maybe generates short writes) */ 2179 retval = simple_io(dev, urb, param->iterations, param->vary, 2180 0, "test4"); 2181 simple_free_urb(urb); 2182 break; 2183 2184 /* Queued bulk I/O tests */ 2185 case 5: 2186 if (dev->out_pipe == 0 || param->sglen == 0) 2187 break; 2188 dev_info(&intf->dev, 2189 "TEST 5: write %d sglists %d entries of %d bytes\n", 2190 param->iterations, 2191 param->sglen, param->length); 2192 sg = alloc_sglist(param->sglen, param->length, 2193 0, dev, dev->out_pipe); 2194 if (!sg) { 2195 retval = -ENOMEM; 2196 break; 2197 } 2198 /* FIRMWARE: bulk sink (maybe accepts short writes) */ 2199 retval = perform_sglist(dev, param->iterations, dev->out_pipe, 2200 &req, sg, param->sglen); 2201 free_sglist(sg, param->sglen); 2202 break; 2203 2204 case 6: 2205 if (dev->in_pipe == 0 || param->sglen == 0) 2206 break; 2207 dev_info(&intf->dev, 2208 "TEST 6: read %d sglists %d entries of %d bytes\n", 2209 param->iterations, 2210 param->sglen, param->length); 2211 sg = alloc_sglist(param->sglen, param->length, 2212 0, dev, dev->in_pipe); 2213 if (!sg) { 2214 retval = -ENOMEM; 2215 break; 2216 } 2217 /* FIRMWARE: bulk source (maybe generates short writes) */ 2218 retval = perform_sglist(dev, param->iterations, dev->in_pipe, 2219 &req, sg, param->sglen); 2220 free_sglist(sg, param->sglen); 2221 break; 2222 case 7: 2223 if (dev->out_pipe == 0 || param->sglen == 0 || param->vary == 0) 2224 break; 2225 dev_info(&intf->dev, 2226 "TEST 7: write/%d %d sglists %d entries 0..%d bytes\n", 2227 param->vary, param->iterations, 2228 param->sglen, param->length); 2229 sg = alloc_sglist(param->sglen, param->length, 2230 param->vary, dev, dev->out_pipe); 2231 if (!sg) { 2232 retval = -ENOMEM; 2233 break; 2234 } 2235 /* FIRMWARE: bulk sink (maybe accepts short writes) */ 2236 retval = perform_sglist(dev, param->iterations, dev->out_pipe, 2237 &req, sg, param->sglen); 2238 free_sglist(sg, param->sglen); 2239 break; 2240 case 8: 2241 if (dev->in_pipe == 0 || param->sglen == 0 || param->vary == 0) 2242 break; 2243 dev_info(&intf->dev, 2244 "TEST 8: read/%d %d sglists %d entries 0..%d bytes\n", 2245 param->vary, param->iterations, 2246 param->sglen, param->length); 2247 sg = alloc_sglist(param->sglen, param->length, 2248 param->vary, dev, dev->in_pipe); 2249 if (!sg) { 2250 retval = -ENOMEM; 2251 break; 2252 } 2253 /* FIRMWARE: bulk source (maybe generates short writes) */ 2254 retval = perform_sglist(dev, param->iterations, dev->in_pipe, 2255 &req, sg, param->sglen); 2256 free_sglist(sg, param->sglen); 2257 break; 2258 2259 /* non-queued sanity tests for control (chapter 9 subset) */ 2260 case 9: 2261 retval = 0; 2262 dev_info(&intf->dev, 2263 "TEST 9: ch9 (subset) control tests, %d times\n", 2264 param->iterations); 2265 for (i = param->iterations; retval == 0 && i--; /* NOP */) 2266 retval = ch9_postconfig(dev); 2267 if (retval) 2268 dev_err(&intf->dev, "ch9 subset failed, " 2269 "iterations left %d\n", i); 2270 break; 2271 2272 /* queued control messaging */ 2273 case 10: 2274 retval = 0; 2275 dev_info(&intf->dev, 2276 "TEST 10: queue %d control calls, %d times\n", 2277 param->sglen, 2278 param->iterations); 2279 retval = test_ctrl_queue(dev, param); 2280 break; 2281 2282 /* simple non-queued unlinks (ring with one urb) */ 2283 case 11: 2284 if (dev->in_pipe == 0 || !param->length) 2285 break; 2286 retval = 0; 2287 dev_info(&intf->dev, "TEST 11: unlink %d reads of %d\n", 2288 param->iterations, param->length); 2289 for (i = param->iterations; retval == 0 && i--; /* NOP */) 2290 retval = unlink_simple(dev, dev->in_pipe, 2291 param->length); 2292 if (retval) 2293 dev_err(&intf->dev, "unlink reads failed %d, " 2294 "iterations left %d\n", retval, i); 2295 break; 2296 case 12: 2297 if (dev->out_pipe == 0 || !param->length) 2298 break; 2299 retval = 0; 2300 dev_info(&intf->dev, "TEST 12: unlink %d writes of %d\n", 2301 param->iterations, param->length); 2302 for (i = param->iterations; retval == 0 && i--; /* NOP */) 2303 retval = unlink_simple(dev, dev->out_pipe, 2304 param->length); 2305 if (retval) 2306 dev_err(&intf->dev, "unlink writes failed %d, " 2307 "iterations left %d\n", retval, i); 2308 break; 2309 2310 /* ep halt tests */ 2311 case 13: 2312 if (dev->out_pipe == 0 && dev->in_pipe == 0) 2313 break; 2314 retval = 0; 2315 dev_info(&intf->dev, "TEST 13: set/clear %d halts\n", 2316 param->iterations); 2317 for (i = param->iterations; retval == 0 && i--; /* NOP */) 2318 retval = halt_simple(dev); 2319 2320 if (retval) 2321 ERROR(dev, "halts failed, iterations left %d\n", i); 2322 break; 2323 2324 /* control write tests */ 2325 case 14: 2326 if (!dev->info->ctrl_out) 2327 break; 2328 dev_info(&intf->dev, "TEST 14: %d ep0out, %d..%d vary %d\n", 2329 param->iterations, 2330 realworld ? 1 : 0, param->length, 2331 param->vary); 2332 retval = ctrl_out(dev, param->iterations, 2333 param->length, param->vary, 0); 2334 break; 2335 2336 /* iso write tests */ 2337 case 15: 2338 if (dev->out_iso_pipe == 0 || param->sglen == 0) 2339 break; 2340 dev_info(&intf->dev, 2341 "TEST 15: write %d iso, %d entries of %d bytes\n", 2342 param->iterations, 2343 param->sglen, param->length); 2344 /* FIRMWARE: iso sink */ 2345 retval = test_queue(dev, param, 2346 dev->out_iso_pipe, dev->iso_out, 0); 2347 break; 2348 2349 /* iso read tests */ 2350 case 16: 2351 if (dev->in_iso_pipe == 0 || param->sglen == 0) 2352 break; 2353 dev_info(&intf->dev, 2354 "TEST 16: read %d iso, %d entries of %d bytes\n", 2355 param->iterations, 2356 param->sglen, param->length); 2357 /* FIRMWARE: iso source */ 2358 retval = test_queue(dev, param, 2359 dev->in_iso_pipe, dev->iso_in, 0); 2360 break; 2361 2362 /* FIXME scatterlist cancel (needs helper thread) */ 2363 2364 /* Tests for bulk I/O using DMA mapping by core and odd address */ 2365 case 17: 2366 if (dev->out_pipe == 0) 2367 break; 2368 dev_info(&intf->dev, 2369 "TEST 17: write odd addr %d bytes %u times core map\n", 2370 param->length, param->iterations); 2371 2372 retval = test_unaligned_bulk( 2373 dev, dev->out_pipe, 2374 param->length, param->iterations, 2375 0, "test17"); 2376 break; 2377 2378 case 18: 2379 if (dev->in_pipe == 0) 2380 break; 2381 dev_info(&intf->dev, 2382 "TEST 18: read odd addr %d bytes %u times core map\n", 2383 param->length, param->iterations); 2384 2385 retval = test_unaligned_bulk( 2386 dev, dev->in_pipe, 2387 param->length, param->iterations, 2388 0, "test18"); 2389 break; 2390 2391 /* Tests for bulk I/O using premapped coherent buffer and odd address */ 2392 case 19: 2393 if (dev->out_pipe == 0) 2394 break; 2395 dev_info(&intf->dev, 2396 "TEST 19: write odd addr %d bytes %u times premapped\n", 2397 param->length, param->iterations); 2398 2399 retval = test_unaligned_bulk( 2400 dev, dev->out_pipe, 2401 param->length, param->iterations, 2402 URB_NO_TRANSFER_DMA_MAP, "test19"); 2403 break; 2404 2405 case 20: 2406 if (dev->in_pipe == 0) 2407 break; 2408 dev_info(&intf->dev, 2409 "TEST 20: read odd addr %d bytes %u times premapped\n", 2410 param->length, param->iterations); 2411 2412 retval = test_unaligned_bulk( 2413 dev, dev->in_pipe, 2414 param->length, param->iterations, 2415 URB_NO_TRANSFER_DMA_MAP, "test20"); 2416 break; 2417 2418 /* control write tests with unaligned buffer */ 2419 case 21: 2420 if (!dev->info->ctrl_out) 2421 break; 2422 dev_info(&intf->dev, 2423 "TEST 21: %d ep0out odd addr, %d..%d vary %d\n", 2424 param->iterations, 2425 realworld ? 1 : 0, param->length, 2426 param->vary); 2427 retval = ctrl_out(dev, param->iterations, 2428 param->length, param->vary, 1); 2429 break; 2430 2431 /* unaligned iso tests */ 2432 case 22: 2433 if (dev->out_iso_pipe == 0 || param->sglen == 0) 2434 break; 2435 dev_info(&intf->dev, 2436 "TEST 22: write %d iso odd, %d entries of %d bytes\n", 2437 param->iterations, 2438 param->sglen, param->length); 2439 retval = test_queue(dev, param, 2440 dev->out_iso_pipe, dev->iso_out, 1); 2441 break; 2442 2443 case 23: 2444 if (dev->in_iso_pipe == 0 || param->sglen == 0) 2445 break; 2446 dev_info(&intf->dev, 2447 "TEST 23: read %d iso odd, %d entries of %d bytes\n", 2448 param->iterations, 2449 param->sglen, param->length); 2450 retval = test_queue(dev, param, 2451 dev->in_iso_pipe, dev->iso_in, 1); 2452 break; 2453 2454 /* unlink URBs from a bulk-OUT queue */ 2455 case 24: 2456 if (dev->out_pipe == 0 || !param->length || param->sglen < 4) 2457 break; 2458 retval = 0; 2459 dev_info(&intf->dev, "TEST 24: unlink from %d queues of " 2460 "%d %d-byte writes\n", 2461 param->iterations, param->sglen, param->length); 2462 for (i = param->iterations; retval == 0 && i > 0; --i) { 2463 retval = unlink_queued(dev, dev->out_pipe, 2464 param->sglen, param->length); 2465 if (retval) { 2466 dev_err(&intf->dev, 2467 "unlink queued writes failed %d, " 2468 "iterations left %d\n", retval, i); 2469 break; 2470 } 2471 } 2472 break; 2473 2474 /* Simple non-queued interrupt I/O tests */ 2475 case 25: 2476 if (dev->out_int_pipe == 0) 2477 break; 2478 dev_info(&intf->dev, 2479 "TEST 25: write %d bytes %u times\n", 2480 param->length, param->iterations); 2481 urb = simple_alloc_urb(udev, dev->out_int_pipe, param->length, 2482 dev->int_out->bInterval); 2483 if (!urb) { 2484 retval = -ENOMEM; 2485 break; 2486 } 2487 /* FIRMWARE: interrupt sink (maybe accepts short writes) */ 2488 retval = simple_io(dev, urb, param->iterations, 0, 0, "test25"); 2489 simple_free_urb(urb); 2490 break; 2491 case 26: 2492 if (dev->in_int_pipe == 0) 2493 break; 2494 dev_info(&intf->dev, 2495 "TEST 26: read %d bytes %u times\n", 2496 param->length, param->iterations); 2497 urb = simple_alloc_urb(udev, dev->in_int_pipe, param->length, 2498 dev->int_in->bInterval); 2499 if (!urb) { 2500 retval = -ENOMEM; 2501 break; 2502 } 2503 /* FIRMWARE: interrupt source (maybe generates short writes) */ 2504 retval = simple_io(dev, urb, param->iterations, 0, 0, "test26"); 2505 simple_free_urb(urb); 2506 break; 2507 case 27: 2508 /* We do performance test, so ignore data compare */ 2509 if (dev->out_pipe == 0 || param->sglen == 0 || pattern != 0) 2510 break; 2511 dev_info(&intf->dev, 2512 "TEST 27: bulk write %dMbytes\n", (param->iterations * 2513 param->sglen * param->length) / (1024 * 1024)); 2514 retval = test_queue(dev, param, 2515 dev->out_pipe, NULL, 0); 2516 break; 2517 case 28: 2518 if (dev->in_pipe == 0 || param->sglen == 0 || pattern != 0) 2519 break; 2520 dev_info(&intf->dev, 2521 "TEST 28: bulk read %dMbytes\n", (param->iterations * 2522 param->sglen * param->length) / (1024 * 1024)); 2523 retval = test_queue(dev, param, 2524 dev->in_pipe, NULL, 0); 2525 break; 2526 } 2527 return retval; 2528 } 2529 2530 /*-------------------------------------------------------------------------*/ 2531 2532 /* We only have this one interface to user space, through usbfs. 2533 * User mode code can scan usbfs to find N different devices (maybe on 2534 * different busses) to use when testing, and allocate one thread per 2535 * test. So discovery is simplified, and we have no device naming issues. 2536 * 2537 * Don't use these only as stress/load tests. Use them along with with 2538 * other USB bus activity: plugging, unplugging, mousing, mp3 playback, 2539 * video capture, and so on. Run different tests at different times, in 2540 * different sequences. Nothing here should interact with other devices, 2541 * except indirectly by consuming USB bandwidth and CPU resources for test 2542 * threads and request completion. But the only way to know that for sure 2543 * is to test when HC queues are in use by many devices. 2544 * 2545 * WARNING: Because usbfs grabs udev->dev.sem before calling this ioctl(), 2546 * it locks out usbcore in certain code paths. Notably, if you disconnect 2547 * the device-under-test, hub_wq will wait block forever waiting for the 2548 * ioctl to complete ... so that usb_disconnect() can abort the pending 2549 * urbs and then call usbtest_disconnect(). To abort a test, you're best 2550 * off just killing the userspace task and waiting for it to exit. 2551 */ 2552 2553 static int 2554 usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf) 2555 { 2556 2557 struct usbtest_dev *dev = usb_get_intfdata(intf); 2558 struct usbtest_param_64 *param_64 = buf; 2559 struct usbtest_param_32 temp; 2560 struct usbtest_param_32 *param_32 = buf; 2561 struct timespec64 start; 2562 struct timespec64 end; 2563 struct timespec64 duration; 2564 int retval = -EOPNOTSUPP; 2565 2566 /* FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is. */ 2567 2568 pattern = mod_pattern; 2569 2570 if (mutex_lock_interruptible(&dev->lock)) 2571 return -ERESTARTSYS; 2572 2573 /* FIXME: What if a system sleep starts while a test is running? */ 2574 2575 /* some devices, like ez-usb default devices, need a non-default 2576 * altsetting to have any active endpoints. some tests change 2577 * altsettings; force a default so most tests don't need to check. 2578 */ 2579 if (dev->info->alt >= 0) { 2580 if (intf->altsetting->desc.bInterfaceNumber) { 2581 retval = -ENODEV; 2582 goto free_mutex; 2583 } 2584 retval = set_altsetting(dev, dev->info->alt); 2585 if (retval) { 2586 dev_err(&intf->dev, 2587 "set altsetting to %d failed, %d\n", 2588 dev->info->alt, retval); 2589 goto free_mutex; 2590 } 2591 } 2592 2593 switch (code) { 2594 case USBTEST_REQUEST_64: 2595 temp.test_num = param_64->test_num; 2596 temp.iterations = param_64->iterations; 2597 temp.length = param_64->length; 2598 temp.sglen = param_64->sglen; 2599 temp.vary = param_64->vary; 2600 param_32 = &temp; 2601 break; 2602 2603 case USBTEST_REQUEST_32: 2604 break; 2605 2606 default: 2607 retval = -EOPNOTSUPP; 2608 goto free_mutex; 2609 } 2610 2611 ktime_get_ts64(&start); 2612 2613 retval = usbtest_do_ioctl(intf, param_32); 2614 if (retval < 0) 2615 goto free_mutex; 2616 2617 ktime_get_ts64(&end); 2618 2619 duration = timespec64_sub(end, start); 2620 2621 temp.duration_sec = duration.tv_sec; 2622 temp.duration_usec = duration.tv_nsec/NSEC_PER_USEC; 2623 2624 switch (code) { 2625 case USBTEST_REQUEST_32: 2626 param_32->duration_sec = temp.duration_sec; 2627 param_32->duration_usec = temp.duration_usec; 2628 break; 2629 2630 case USBTEST_REQUEST_64: 2631 param_64->duration_sec = temp.duration_sec; 2632 param_64->duration_usec = temp.duration_usec; 2633 break; 2634 } 2635 2636 free_mutex: 2637 mutex_unlock(&dev->lock); 2638 return retval; 2639 } 2640 2641 /*-------------------------------------------------------------------------*/ 2642 2643 static unsigned force_interrupt; 2644 module_param(force_interrupt, uint, 0); 2645 MODULE_PARM_DESC(force_interrupt, "0 = test default; else interrupt"); 2646 2647 #ifdef GENERIC 2648 static unsigned short vendor; 2649 module_param(vendor, ushort, 0); 2650 MODULE_PARM_DESC(vendor, "vendor code (from usb-if)"); 2651 2652 static unsigned short product; 2653 module_param(product, ushort, 0); 2654 MODULE_PARM_DESC(product, "product code (from vendor)"); 2655 #endif 2656 2657 static int 2658 usbtest_probe(struct usb_interface *intf, const struct usb_device_id *id) 2659 { 2660 struct usb_device *udev; 2661 struct usbtest_dev *dev; 2662 struct usbtest_info *info; 2663 char *rtest, *wtest; 2664 char *irtest, *iwtest; 2665 char *intrtest, *intwtest; 2666 2667 udev = interface_to_usbdev(intf); 2668 2669 #ifdef GENERIC 2670 /* specify devices by module parameters? */ 2671 if (id->match_flags == 0) { 2672 /* vendor match required, product match optional */ 2673 if (!vendor || le16_to_cpu(udev->descriptor.idVendor) != (u16)vendor) 2674 return -ENODEV; 2675 if (product && le16_to_cpu(udev->descriptor.idProduct) != (u16)product) 2676 return -ENODEV; 2677 dev_info(&intf->dev, "matched module params, " 2678 "vend=0x%04x prod=0x%04x\n", 2679 le16_to_cpu(udev->descriptor.idVendor), 2680 le16_to_cpu(udev->descriptor.idProduct)); 2681 } 2682 #endif 2683 2684 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2685 if (!dev) 2686 return -ENOMEM; 2687 info = (struct usbtest_info *) id->driver_info; 2688 dev->info = info; 2689 mutex_init(&dev->lock); 2690 2691 dev->intf = intf; 2692 2693 /* cacheline-aligned scratch for i/o */ 2694 dev->buf = kmalloc(TBUF_SIZE, GFP_KERNEL); 2695 if (dev->buf == NULL) { 2696 kfree(dev); 2697 return -ENOMEM; 2698 } 2699 2700 /* NOTE this doesn't yet test the handful of difference that are 2701 * visible with high speed interrupts: bigger maxpacket (1K) and 2702 * "high bandwidth" modes (up to 3 packets/uframe). 2703 */ 2704 rtest = wtest = ""; 2705 irtest = iwtest = ""; 2706 intrtest = intwtest = ""; 2707 if (force_interrupt || udev->speed == USB_SPEED_LOW) { 2708 if (info->ep_in) { 2709 dev->in_pipe = usb_rcvintpipe(udev, info->ep_in); 2710 rtest = " intr-in"; 2711 } 2712 if (info->ep_out) { 2713 dev->out_pipe = usb_sndintpipe(udev, info->ep_out); 2714 wtest = " intr-out"; 2715 } 2716 } else { 2717 if (override_alt >= 0 || info->autoconf) { 2718 int status; 2719 2720 status = get_endpoints(dev, intf); 2721 if (status < 0) { 2722 WARNING(dev, "couldn't get endpoints, %d\n", 2723 status); 2724 kfree(dev->buf); 2725 kfree(dev); 2726 return status; 2727 } 2728 /* may find bulk or ISO pipes */ 2729 } else { 2730 if (info->ep_in) 2731 dev->in_pipe = usb_rcvbulkpipe(udev, 2732 info->ep_in); 2733 if (info->ep_out) 2734 dev->out_pipe = usb_sndbulkpipe(udev, 2735 info->ep_out); 2736 } 2737 if (dev->in_pipe) 2738 rtest = " bulk-in"; 2739 if (dev->out_pipe) 2740 wtest = " bulk-out"; 2741 if (dev->in_iso_pipe) 2742 irtest = " iso-in"; 2743 if (dev->out_iso_pipe) 2744 iwtest = " iso-out"; 2745 if (dev->in_int_pipe) 2746 intrtest = " int-in"; 2747 if (dev->out_int_pipe) 2748 intwtest = " int-out"; 2749 } 2750 2751 usb_set_intfdata(intf, dev); 2752 dev_info(&intf->dev, "%s\n", info->name); 2753 dev_info(&intf->dev, "%s {control%s%s%s%s%s%s%s} tests%s\n", 2754 usb_speed_string(udev->speed), 2755 info->ctrl_out ? " in/out" : "", 2756 rtest, wtest, 2757 irtest, iwtest, 2758 intrtest, intwtest, 2759 info->alt >= 0 ? " (+alt)" : ""); 2760 return 0; 2761 } 2762 2763 static int usbtest_suspend(struct usb_interface *intf, pm_message_t message) 2764 { 2765 return 0; 2766 } 2767 2768 static int usbtest_resume(struct usb_interface *intf) 2769 { 2770 return 0; 2771 } 2772 2773 2774 static void usbtest_disconnect(struct usb_interface *intf) 2775 { 2776 struct usbtest_dev *dev = usb_get_intfdata(intf); 2777 2778 usb_set_intfdata(intf, NULL); 2779 dev_dbg(&intf->dev, "disconnect\n"); 2780 kfree(dev); 2781 } 2782 2783 /* Basic testing only needs a device that can source or sink bulk traffic. 2784 * Any device can test control transfers (default with GENERIC binding). 2785 * 2786 * Several entries work with the default EP0 implementation that's built 2787 * into EZ-USB chips. There's a default vendor ID which can be overridden 2788 * by (very) small config EEPROMS, but otherwise all these devices act 2789 * identically until firmware is loaded: only EP0 works. It turns out 2790 * to be easy to make other endpoints work, without modifying that EP0 2791 * behavior. For now, we expect that kind of firmware. 2792 */ 2793 2794 /* an21xx or fx versions of ez-usb */ 2795 static struct usbtest_info ez1_info = { 2796 .name = "EZ-USB device", 2797 .ep_in = 2, 2798 .ep_out = 2, 2799 .alt = 1, 2800 }; 2801 2802 /* fx2 version of ez-usb */ 2803 static struct usbtest_info ez2_info = { 2804 .name = "FX2 device", 2805 .ep_in = 6, 2806 .ep_out = 2, 2807 .alt = 1, 2808 }; 2809 2810 /* ezusb family device with dedicated usb test firmware, 2811 */ 2812 static struct usbtest_info fw_info = { 2813 .name = "usb test device", 2814 .ep_in = 2, 2815 .ep_out = 2, 2816 .alt = 1, 2817 .autoconf = 1, /* iso and ctrl_out need autoconf */ 2818 .ctrl_out = 1, 2819 .iso = 1, /* iso_ep's are #8 in/out */ 2820 }; 2821 2822 /* peripheral running Linux and 'zero.c' test firmware, or 2823 * its user-mode cousin. different versions of this use 2824 * different hardware with the same vendor/product codes. 2825 * host side MUST rely on the endpoint descriptors. 2826 */ 2827 static struct usbtest_info gz_info = { 2828 .name = "Linux gadget zero", 2829 .autoconf = 1, 2830 .ctrl_out = 1, 2831 .iso = 1, 2832 .intr = 1, 2833 .alt = 0, 2834 }; 2835 2836 static struct usbtest_info um_info = { 2837 .name = "Linux user mode test driver", 2838 .autoconf = 1, 2839 .alt = -1, 2840 }; 2841 2842 static struct usbtest_info um2_info = { 2843 .name = "Linux user mode ISO test driver", 2844 .autoconf = 1, 2845 .iso = 1, 2846 .alt = -1, 2847 }; 2848 2849 #ifdef IBOT2 2850 /* this is a nice source of high speed bulk data; 2851 * uses an FX2, with firmware provided in the device 2852 */ 2853 static struct usbtest_info ibot2_info = { 2854 .name = "iBOT2 webcam", 2855 .ep_in = 2, 2856 .alt = -1, 2857 }; 2858 #endif 2859 2860 #ifdef GENERIC 2861 /* we can use any device to test control traffic */ 2862 static struct usbtest_info generic_info = { 2863 .name = "Generic USB device", 2864 .alt = -1, 2865 }; 2866 #endif 2867 2868 2869 static const struct usb_device_id id_table[] = { 2870 2871 /*-------------------------------------------------------------*/ 2872 2873 /* EZ-USB devices which download firmware to replace (or in our 2874 * case augment) the default device implementation. 2875 */ 2876 2877 /* generic EZ-USB FX controller */ 2878 { USB_DEVICE(0x0547, 0x2235), 2879 .driver_info = (unsigned long) &ez1_info, 2880 }, 2881 2882 /* CY3671 development board with EZ-USB FX */ 2883 { USB_DEVICE(0x0547, 0x0080), 2884 .driver_info = (unsigned long) &ez1_info, 2885 }, 2886 2887 /* generic EZ-USB FX2 controller (or development board) */ 2888 { USB_DEVICE(0x04b4, 0x8613), 2889 .driver_info = (unsigned long) &ez2_info, 2890 }, 2891 2892 /* re-enumerated usb test device firmware */ 2893 { USB_DEVICE(0xfff0, 0xfff0), 2894 .driver_info = (unsigned long) &fw_info, 2895 }, 2896 2897 /* "Gadget Zero" firmware runs under Linux */ 2898 { USB_DEVICE(0x0525, 0xa4a0), 2899 .driver_info = (unsigned long) &gz_info, 2900 }, 2901 2902 /* so does a user-mode variant */ 2903 { USB_DEVICE(0x0525, 0xa4a4), 2904 .driver_info = (unsigned long) &um_info, 2905 }, 2906 2907 /* ... and a user-mode variant that talks iso */ 2908 { USB_DEVICE(0x0525, 0xa4a3), 2909 .driver_info = (unsigned long) &um2_info, 2910 }, 2911 2912 #ifdef KEYSPAN_19Qi 2913 /* Keyspan 19qi uses an21xx (original EZ-USB) */ 2914 /* this does not coexist with the real Keyspan 19qi driver! */ 2915 { USB_DEVICE(0x06cd, 0x010b), 2916 .driver_info = (unsigned long) &ez1_info, 2917 }, 2918 #endif 2919 2920 /*-------------------------------------------------------------*/ 2921 2922 #ifdef IBOT2 2923 /* iBOT2 makes a nice source of high speed bulk-in data */ 2924 /* this does not coexist with a real iBOT2 driver! */ 2925 { USB_DEVICE(0x0b62, 0x0059), 2926 .driver_info = (unsigned long) &ibot2_info, 2927 }, 2928 #endif 2929 2930 /*-------------------------------------------------------------*/ 2931 2932 #ifdef GENERIC 2933 /* module params can specify devices to use for control tests */ 2934 { .driver_info = (unsigned long) &generic_info, }, 2935 #endif 2936 2937 /*-------------------------------------------------------------*/ 2938 2939 { } 2940 }; 2941 MODULE_DEVICE_TABLE(usb, id_table); 2942 2943 static struct usb_driver usbtest_driver = { 2944 .name = "usbtest", 2945 .id_table = id_table, 2946 .probe = usbtest_probe, 2947 .unlocked_ioctl = usbtest_ioctl, 2948 .disconnect = usbtest_disconnect, 2949 .suspend = usbtest_suspend, 2950 .resume = usbtest_resume, 2951 }; 2952 2953 /*-------------------------------------------------------------------------*/ 2954 2955 static int __init usbtest_init(void) 2956 { 2957 #ifdef GENERIC 2958 if (vendor) 2959 pr_debug("params: vend=0x%04x prod=0x%04x\n", vendor, product); 2960 #endif 2961 return usb_register(&usbtest_driver); 2962 } 2963 module_init(usbtest_init); 2964 2965 static void __exit usbtest_exit(void) 2966 { 2967 usb_deregister(&usbtest_driver); 2968 } 2969 module_exit(usbtest_exit); 2970 2971 MODULE_DESCRIPTION("USB Core/HCD Testing Driver"); 2972 MODULE_LICENSE("GPL"); 2973 2974