1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/init.h> 5 #include <linux/slab.h> 6 #include <linux/mm.h> 7 #include <linux/module.h> 8 #include <linux/moduleparam.h> 9 #include <linux/scatterlist.h> 10 #include <linux/mutex.h> 11 #include <linux/timer.h> 12 #include <linux/usb.h> 13 14 #define SIMPLE_IO_TIMEOUT 10000 /* in milliseconds */ 15 16 /*-------------------------------------------------------------------------*/ 17 18 static int override_alt = -1; 19 module_param_named(alt, override_alt, int, 0644); 20 MODULE_PARM_DESC(alt, ">= 0 to override altsetting selection"); 21 static void complicated_callback(struct urb *urb); 22 23 /*-------------------------------------------------------------------------*/ 24 25 /* FIXME make these public somewhere; usbdevfs.h? */ 26 27 /* Parameter for usbtest driver. */ 28 struct usbtest_param_32 { 29 /* inputs */ 30 __u32 test_num; /* 0..(TEST_CASES-1) */ 31 __u32 iterations; 32 __u32 length; 33 __u32 vary; 34 __u32 sglen; 35 36 /* outputs */ 37 __s32 duration_sec; 38 __s32 duration_usec; 39 }; 40 41 /* 42 * Compat parameter to the usbtest driver. 43 * This supports older user space binaries compiled with 64 bit compiler. 44 */ 45 struct usbtest_param_64 { 46 /* inputs */ 47 __u32 test_num; /* 0..(TEST_CASES-1) */ 48 __u32 iterations; 49 __u32 length; 50 __u32 vary; 51 __u32 sglen; 52 53 /* outputs */ 54 __s64 duration_sec; 55 __s64 duration_usec; 56 }; 57 58 /* IOCTL interface to the driver. */ 59 #define USBTEST_REQUEST_32 _IOWR('U', 100, struct usbtest_param_32) 60 /* COMPAT IOCTL interface to the driver. */ 61 #define USBTEST_REQUEST_64 _IOWR('U', 100, struct usbtest_param_64) 62 63 /*-------------------------------------------------------------------------*/ 64 65 #define GENERIC /* let probe() bind using module params */ 66 67 /* Some devices that can be used for testing will have "real" drivers. 68 * Entries for those need to be enabled here by hand, after disabling 69 * that "real" driver. 70 */ 71 //#define IBOT2 /* grab iBOT2 webcams */ 72 //#define KEYSPAN_19Qi /* grab un-renumerated serial adapter */ 73 74 /*-------------------------------------------------------------------------*/ 75 76 struct usbtest_info { 77 const char *name; 78 u8 ep_in; /* bulk/intr source */ 79 u8 ep_out; /* bulk/intr sink */ 80 unsigned autoconf:1; 81 unsigned ctrl_out:1; 82 unsigned iso:1; /* try iso in/out */ 83 unsigned intr:1; /* try interrupt in/out */ 84 int alt; 85 }; 86 87 /* this is accessed only through usbfs ioctl calls. 88 * one ioctl to issue a test ... one lock per device. 89 * tests create other threads if they need them. 90 * urbs and buffers are allocated dynamically, 91 * and data generated deterministically. 92 */ 93 struct usbtest_dev { 94 struct usb_interface *intf; 95 struct usbtest_info *info; 96 int in_pipe; 97 int out_pipe; 98 int in_iso_pipe; 99 int out_iso_pipe; 100 int in_int_pipe; 101 int out_int_pipe; 102 struct usb_endpoint_descriptor *iso_in, *iso_out; 103 struct usb_endpoint_descriptor *int_in, *int_out; 104 struct mutex lock; 105 106 #define TBUF_SIZE 256 107 u8 *buf; 108 }; 109 110 static struct usb_device *testdev_to_usbdev(struct usbtest_dev *test) 111 { 112 return interface_to_usbdev(test->intf); 113 } 114 115 /* set up all urbs so they can be used with either bulk or interrupt */ 116 #define INTERRUPT_RATE 1 /* msec/transfer */ 117 118 #define ERROR(tdev, fmt, args...) \ 119 dev_err(&(tdev)->intf->dev , fmt , ## args) 120 #define WARNING(tdev, fmt, args...) \ 121 dev_warn(&(tdev)->intf->dev , fmt , ## args) 122 123 #define GUARD_BYTE 0xA5 124 #define MAX_SGLEN 128 125 126 /*-------------------------------------------------------------------------*/ 127 128 static inline void endpoint_update(int edi, 129 struct usb_host_endpoint **in, 130 struct usb_host_endpoint **out, 131 struct usb_host_endpoint *e) 132 { 133 if (edi) { 134 if (!*in) 135 *in = e; 136 } else { 137 if (!*out) 138 *out = e; 139 } 140 } 141 142 static int 143 get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf) 144 { 145 int tmp; 146 struct usb_host_interface *alt; 147 struct usb_host_endpoint *in, *out; 148 struct usb_host_endpoint *iso_in, *iso_out; 149 struct usb_host_endpoint *int_in, *int_out; 150 struct usb_device *udev; 151 152 for (tmp = 0; tmp < intf->num_altsetting; tmp++) { 153 unsigned ep; 154 155 in = out = NULL; 156 iso_in = iso_out = NULL; 157 int_in = int_out = NULL; 158 alt = intf->altsetting + tmp; 159 160 if (override_alt >= 0 && 161 override_alt != alt->desc.bAlternateSetting) 162 continue; 163 164 /* take the first altsetting with in-bulk + out-bulk; 165 * ignore other endpoints and altsettings. 166 */ 167 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) { 168 struct usb_host_endpoint *e; 169 int edi; 170 171 e = alt->endpoint + ep; 172 edi = usb_endpoint_dir_in(&e->desc); 173 174 switch (usb_endpoint_type(&e->desc)) { 175 case USB_ENDPOINT_XFER_BULK: 176 endpoint_update(edi, &in, &out, e); 177 continue; 178 case USB_ENDPOINT_XFER_INT: 179 if (dev->info->intr) 180 endpoint_update(edi, &int_in, &int_out, e); 181 continue; 182 case USB_ENDPOINT_XFER_ISOC: 183 if (dev->info->iso) 184 endpoint_update(edi, &iso_in, &iso_out, e); 185 fallthrough; 186 default: 187 continue; 188 } 189 } 190 if ((in && out) || iso_in || iso_out || int_in || int_out) 191 goto found; 192 } 193 return -EINVAL; 194 195 found: 196 udev = testdev_to_usbdev(dev); 197 dev->info->alt = alt->desc.bAlternateSetting; 198 if (alt->desc.bAlternateSetting != 0) { 199 tmp = usb_set_interface(udev, 200 alt->desc.bInterfaceNumber, 201 alt->desc.bAlternateSetting); 202 if (tmp < 0) 203 return tmp; 204 } 205 206 if (in) 207 dev->in_pipe = usb_rcvbulkpipe(udev, 208 in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 209 if (out) 210 dev->out_pipe = usb_sndbulkpipe(udev, 211 out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 212 213 if (iso_in) { 214 dev->iso_in = &iso_in->desc; 215 dev->in_iso_pipe = usb_rcvisocpipe(udev, 216 iso_in->desc.bEndpointAddress 217 & USB_ENDPOINT_NUMBER_MASK); 218 } 219 220 if (iso_out) { 221 dev->iso_out = &iso_out->desc; 222 dev->out_iso_pipe = usb_sndisocpipe(udev, 223 iso_out->desc.bEndpointAddress 224 & USB_ENDPOINT_NUMBER_MASK); 225 } 226 227 if (int_in) { 228 dev->int_in = &int_in->desc; 229 dev->in_int_pipe = usb_rcvintpipe(udev, 230 int_in->desc.bEndpointAddress 231 & USB_ENDPOINT_NUMBER_MASK); 232 } 233 234 if (int_out) { 235 dev->int_out = &int_out->desc; 236 dev->out_int_pipe = usb_sndintpipe(udev, 237 int_out->desc.bEndpointAddress 238 & USB_ENDPOINT_NUMBER_MASK); 239 } 240 return 0; 241 } 242 243 /*-------------------------------------------------------------------------*/ 244 245 /* Support for testing basic non-queued I/O streams. 246 * 247 * These just package urbs as requests that can be easily canceled. 248 * Each urb's data buffer is dynamically allocated; callers can fill 249 * them with non-zero test data (or test for it) when appropriate. 250 */ 251 252 static void simple_callback(struct urb *urb) 253 { 254 complete(urb->context); 255 } 256 257 static struct urb *usbtest_alloc_urb( 258 struct usb_device *udev, 259 int pipe, 260 unsigned long bytes, 261 unsigned transfer_flags, 262 unsigned offset, 263 u8 bInterval, 264 usb_complete_t complete_fn) 265 { 266 struct urb *urb; 267 268 urb = usb_alloc_urb(0, GFP_KERNEL); 269 if (!urb) 270 return urb; 271 272 if (bInterval) 273 usb_fill_int_urb(urb, udev, pipe, NULL, bytes, complete_fn, 274 NULL, bInterval); 275 else 276 usb_fill_bulk_urb(urb, udev, pipe, NULL, bytes, complete_fn, 277 NULL); 278 279 urb->interval = (udev->speed == USB_SPEED_HIGH) 280 ? (INTERRUPT_RATE << 3) 281 : INTERRUPT_RATE; 282 urb->transfer_flags = transfer_flags; 283 if (usb_pipein(pipe)) 284 urb->transfer_flags |= URB_SHORT_NOT_OK; 285 286 if ((bytes + offset) == 0) 287 return urb; 288 289 if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) 290 urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset, 291 GFP_KERNEL, &urb->transfer_dma); 292 else 293 urb->transfer_buffer = kmalloc(bytes + offset, GFP_KERNEL); 294 295 if (!urb->transfer_buffer) { 296 usb_free_urb(urb); 297 return NULL; 298 } 299 300 /* To test unaligned transfers add an offset and fill the 301 unused memory with a guard value */ 302 if (offset) { 303 memset(urb->transfer_buffer, GUARD_BYTE, offset); 304 urb->transfer_buffer += offset; 305 if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) 306 urb->transfer_dma += offset; 307 } 308 309 /* For inbound transfers use guard byte so that test fails if 310 data not correctly copied */ 311 memset(urb->transfer_buffer, 312 usb_pipein(urb->pipe) ? GUARD_BYTE : 0, 313 bytes); 314 return urb; 315 } 316 317 static struct urb *simple_alloc_urb( 318 struct usb_device *udev, 319 int pipe, 320 unsigned long bytes, 321 u8 bInterval) 322 { 323 return usbtest_alloc_urb(udev, pipe, bytes, URB_NO_TRANSFER_DMA_MAP, 0, 324 bInterval, simple_callback); 325 } 326 327 static struct urb *complicated_alloc_urb( 328 struct usb_device *udev, 329 int pipe, 330 unsigned long bytes, 331 u8 bInterval) 332 { 333 return usbtest_alloc_urb(udev, pipe, bytes, URB_NO_TRANSFER_DMA_MAP, 0, 334 bInterval, complicated_callback); 335 } 336 337 static unsigned pattern; 338 static unsigned mod_pattern; 339 module_param_named(pattern, mod_pattern, uint, S_IRUGO | S_IWUSR); 340 MODULE_PARM_DESC(mod_pattern, "i/o pattern (0 == zeroes)"); 341 342 static unsigned get_maxpacket(struct usb_device *udev, int pipe) 343 { 344 struct usb_host_endpoint *ep; 345 346 ep = usb_pipe_endpoint(udev, pipe); 347 return le16_to_cpup(&ep->desc.wMaxPacketSize); 348 } 349 350 static int ss_isoc_get_packet_num(struct usb_device *udev, int pipe) 351 { 352 struct usb_host_endpoint *ep = usb_pipe_endpoint(udev, pipe); 353 354 return USB_SS_MULT(ep->ss_ep_comp.bmAttributes) 355 * (1 + ep->ss_ep_comp.bMaxBurst); 356 } 357 358 static void simple_fill_buf(struct urb *urb) 359 { 360 unsigned i; 361 u8 *buf = urb->transfer_buffer; 362 unsigned len = urb->transfer_buffer_length; 363 unsigned maxpacket; 364 365 switch (pattern) { 366 default: 367 fallthrough; 368 case 0: 369 memset(buf, 0, len); 370 break; 371 case 1: /* mod63 */ 372 maxpacket = get_maxpacket(urb->dev, urb->pipe); 373 for (i = 0; i < len; i++) 374 *buf++ = (u8) ((i % maxpacket) % 63); 375 break; 376 } 377 } 378 379 static inline unsigned long buffer_offset(void *buf) 380 { 381 return (unsigned long)buf & (ARCH_KMALLOC_MINALIGN - 1); 382 } 383 384 static int check_guard_bytes(struct usbtest_dev *tdev, struct urb *urb) 385 { 386 u8 *buf = urb->transfer_buffer; 387 u8 *guard = buf - buffer_offset(buf); 388 unsigned i; 389 390 for (i = 0; guard < buf; i++, guard++) { 391 if (*guard != GUARD_BYTE) { 392 ERROR(tdev, "guard byte[%d] %d (not %d)\n", 393 i, *guard, GUARD_BYTE); 394 return -EINVAL; 395 } 396 } 397 return 0; 398 } 399 400 static int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb) 401 { 402 unsigned i; 403 u8 expected; 404 u8 *buf = urb->transfer_buffer; 405 unsigned len = urb->actual_length; 406 unsigned maxpacket = get_maxpacket(urb->dev, urb->pipe); 407 408 int ret = check_guard_bytes(tdev, urb); 409 if (ret) 410 return ret; 411 412 for (i = 0; i < len; i++, buf++) { 413 switch (pattern) { 414 /* all-zeroes has no synchronization issues */ 415 case 0: 416 expected = 0; 417 break; 418 /* mod63 stays in sync with short-terminated transfers, 419 * or otherwise when host and gadget agree on how large 420 * each usb transfer request should be. resync is done 421 * with set_interface or set_config. 422 */ 423 case 1: /* mod63 */ 424 expected = (i % maxpacket) % 63; 425 break; 426 /* always fail unsupported patterns */ 427 default: 428 expected = !*buf; 429 break; 430 } 431 if (*buf == expected) 432 continue; 433 ERROR(tdev, "buf[%d] = %d (not %d)\n", i, *buf, expected); 434 return -EINVAL; 435 } 436 return 0; 437 } 438 439 static void simple_free_urb(struct urb *urb) 440 { 441 unsigned long offset = buffer_offset(urb->transfer_buffer); 442 443 if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) 444 usb_free_coherent( 445 urb->dev, 446 urb->transfer_buffer_length + offset, 447 urb->transfer_buffer - offset, 448 urb->transfer_dma - offset); 449 else 450 kfree(urb->transfer_buffer - offset); 451 usb_free_urb(urb); 452 } 453 454 static int simple_io( 455 struct usbtest_dev *tdev, 456 struct urb *urb, 457 int iterations, 458 int vary, 459 int expected, 460 const char *label 461 ) 462 { 463 struct usb_device *udev = urb->dev; 464 int max = urb->transfer_buffer_length; 465 struct completion completion; 466 int retval = 0; 467 unsigned long expire; 468 469 urb->context = &completion; 470 while (retval == 0 && iterations-- > 0) { 471 init_completion(&completion); 472 if (usb_pipeout(urb->pipe)) { 473 simple_fill_buf(urb); 474 urb->transfer_flags |= URB_ZERO_PACKET; 475 } 476 retval = usb_submit_urb(urb, GFP_KERNEL); 477 if (retval != 0) 478 break; 479 480 expire = msecs_to_jiffies(SIMPLE_IO_TIMEOUT); 481 if (!wait_for_completion_timeout(&completion, expire)) { 482 usb_kill_urb(urb); 483 retval = (urb->status == -ENOENT ? 484 -ETIMEDOUT : urb->status); 485 } else { 486 retval = urb->status; 487 } 488 489 urb->dev = udev; 490 if (retval == 0 && usb_pipein(urb->pipe)) 491 retval = simple_check_buf(tdev, urb); 492 493 if (vary) { 494 int len = urb->transfer_buffer_length; 495 496 len += vary; 497 len %= max; 498 if (len == 0) 499 len = (vary < max) ? vary : max; 500 urb->transfer_buffer_length = len; 501 } 502 503 /* FIXME if endpoint halted, clear halt (and log) */ 504 } 505 urb->transfer_buffer_length = max; 506 507 if (expected != retval) 508 dev_err(&udev->dev, 509 "%s failed, iterations left %d, status %d (not %d)\n", 510 label, iterations, retval, expected); 511 return retval; 512 } 513 514 515 /*-------------------------------------------------------------------------*/ 516 517 /* We use scatterlist primitives to test queued I/O. 518 * Yes, this also tests the scatterlist primitives. 519 */ 520 521 static void free_sglist(struct scatterlist *sg, int nents) 522 { 523 unsigned i; 524 525 if (!sg) 526 return; 527 for (i = 0; i < nents; i++) { 528 if (!sg_page(&sg[i])) 529 continue; 530 kfree(sg_virt(&sg[i])); 531 } 532 kfree(sg); 533 } 534 535 static struct scatterlist * 536 alloc_sglist(int nents, int max, int vary, struct usbtest_dev *dev, int pipe) 537 { 538 struct scatterlist *sg; 539 unsigned int n_size = 0; 540 unsigned i; 541 unsigned size = max; 542 unsigned maxpacket = 543 get_maxpacket(interface_to_usbdev(dev->intf), pipe); 544 545 if (max == 0) 546 return NULL; 547 548 sg = kmalloc_array(nents, sizeof(*sg), GFP_KERNEL); 549 if (!sg) 550 return NULL; 551 sg_init_table(sg, nents); 552 553 for (i = 0; i < nents; i++) { 554 char *buf; 555 unsigned j; 556 557 buf = kzalloc(size, GFP_KERNEL); 558 if (!buf) { 559 free_sglist(sg, i); 560 return NULL; 561 } 562 563 /* kmalloc pages are always physically contiguous! */ 564 sg_set_buf(&sg[i], buf, size); 565 566 switch (pattern) { 567 case 0: 568 /* already zeroed */ 569 break; 570 case 1: 571 for (j = 0; j < size; j++) 572 *buf++ = (u8) (((j + n_size) % maxpacket) % 63); 573 n_size += size; 574 break; 575 } 576 577 if (vary) { 578 size += vary; 579 size %= max; 580 if (size == 0) 581 size = (vary < max) ? vary : max; 582 } 583 } 584 585 return sg; 586 } 587 588 struct sg_timeout { 589 struct timer_list timer; 590 struct usb_sg_request *req; 591 }; 592 593 static void sg_timeout(struct timer_list *t) 594 { 595 struct sg_timeout *timeout = from_timer(timeout, t, timer); 596 597 usb_sg_cancel(timeout->req); 598 } 599 600 static int perform_sglist( 601 struct usbtest_dev *tdev, 602 unsigned iterations, 603 int pipe, 604 struct usb_sg_request *req, 605 struct scatterlist *sg, 606 int nents 607 ) 608 { 609 struct usb_device *udev = testdev_to_usbdev(tdev); 610 int retval = 0; 611 struct sg_timeout timeout = { 612 .req = req, 613 }; 614 615 timer_setup_on_stack(&timeout.timer, sg_timeout, 0); 616 617 while (retval == 0 && iterations-- > 0) { 618 retval = usb_sg_init(req, udev, pipe, 619 (udev->speed == USB_SPEED_HIGH) 620 ? (INTERRUPT_RATE << 3) 621 : INTERRUPT_RATE, 622 sg, nents, 0, GFP_KERNEL); 623 624 if (retval) 625 break; 626 mod_timer(&timeout.timer, jiffies + 627 msecs_to_jiffies(SIMPLE_IO_TIMEOUT)); 628 usb_sg_wait(req); 629 if (!del_timer_sync(&timeout.timer)) 630 retval = -ETIMEDOUT; 631 else 632 retval = req->status; 633 destroy_timer_on_stack(&timeout.timer); 634 635 /* FIXME check resulting data pattern */ 636 637 /* FIXME if endpoint halted, clear halt (and log) */ 638 } 639 640 /* FIXME for unlink or fault handling tests, don't report 641 * failure if retval is as we expected ... 642 */ 643 if (retval) 644 ERROR(tdev, "perform_sglist failed, " 645 "iterations left %d, status %d\n", 646 iterations, retval); 647 return retval; 648 } 649 650 651 /*-------------------------------------------------------------------------*/ 652 653 /* unqueued control message testing 654 * 655 * there's a nice set of device functional requirements in chapter 9 of the 656 * usb 2.0 spec, which we can apply to ANY device, even ones that don't use 657 * special test firmware. 658 * 659 * we know the device is configured (or suspended) by the time it's visible 660 * through usbfs. we can't change that, so we won't test enumeration (which 661 * worked 'well enough' to get here, this time), power management (ditto), 662 * or remote wakeup (which needs human interaction). 663 */ 664 665 static unsigned realworld = 1; 666 module_param(realworld, uint, 0); 667 MODULE_PARM_DESC(realworld, "clear to demand stricter spec compliance"); 668 669 static int get_altsetting(struct usbtest_dev *dev) 670 { 671 struct usb_interface *iface = dev->intf; 672 struct usb_device *udev = interface_to_usbdev(iface); 673 int retval; 674 675 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 676 USB_REQ_GET_INTERFACE, USB_DIR_IN|USB_RECIP_INTERFACE, 677 0, iface->altsetting[0].desc.bInterfaceNumber, 678 dev->buf, 1, USB_CTRL_GET_TIMEOUT); 679 switch (retval) { 680 case 1: 681 return dev->buf[0]; 682 case 0: 683 retval = -ERANGE; 684 fallthrough; 685 default: 686 return retval; 687 } 688 } 689 690 static int set_altsetting(struct usbtest_dev *dev, int alternate) 691 { 692 struct usb_interface *iface = dev->intf; 693 struct usb_device *udev; 694 695 if (alternate < 0 || alternate >= 256) 696 return -EINVAL; 697 698 udev = interface_to_usbdev(iface); 699 return usb_set_interface(udev, 700 iface->altsetting[0].desc.bInterfaceNumber, 701 alternate); 702 } 703 704 static int is_good_config(struct usbtest_dev *tdev, int len) 705 { 706 struct usb_config_descriptor *config; 707 708 if (len < (int)sizeof(*config)) 709 return 0; 710 config = (struct usb_config_descriptor *) tdev->buf; 711 712 switch (config->bDescriptorType) { 713 case USB_DT_CONFIG: 714 case USB_DT_OTHER_SPEED_CONFIG: 715 if (config->bLength != 9) { 716 ERROR(tdev, "bogus config descriptor length\n"); 717 return 0; 718 } 719 /* this bit 'must be 1' but often isn't */ 720 if (!realworld && !(config->bmAttributes & 0x80)) { 721 ERROR(tdev, "high bit of config attributes not set\n"); 722 return 0; 723 } 724 if (config->bmAttributes & 0x1f) { /* reserved == 0 */ 725 ERROR(tdev, "reserved config bits set\n"); 726 return 0; 727 } 728 break; 729 default: 730 return 0; 731 } 732 733 if (le16_to_cpu(config->wTotalLength) == len) /* read it all */ 734 return 1; 735 if (le16_to_cpu(config->wTotalLength) >= TBUF_SIZE) /* max partial read */ 736 return 1; 737 ERROR(tdev, "bogus config descriptor read size\n"); 738 return 0; 739 } 740 741 static int is_good_ext(struct usbtest_dev *tdev, u8 *buf) 742 { 743 struct usb_ext_cap_descriptor *ext; 744 u32 attr; 745 746 ext = (struct usb_ext_cap_descriptor *) buf; 747 748 if (ext->bLength != USB_DT_USB_EXT_CAP_SIZE) { 749 ERROR(tdev, "bogus usb 2.0 extension descriptor length\n"); 750 return 0; 751 } 752 753 attr = le32_to_cpu(ext->bmAttributes); 754 /* bits[1:15] is used and others are reserved */ 755 if (attr & ~0xfffe) { /* reserved == 0 */ 756 ERROR(tdev, "reserved bits set\n"); 757 return 0; 758 } 759 760 return 1; 761 } 762 763 static int is_good_ss_cap(struct usbtest_dev *tdev, u8 *buf) 764 { 765 struct usb_ss_cap_descriptor *ss; 766 767 ss = (struct usb_ss_cap_descriptor *) buf; 768 769 if (ss->bLength != USB_DT_USB_SS_CAP_SIZE) { 770 ERROR(tdev, "bogus superspeed device capability descriptor length\n"); 771 return 0; 772 } 773 774 /* 775 * only bit[1] of bmAttributes is used for LTM and others are 776 * reserved 777 */ 778 if (ss->bmAttributes & ~0x02) { /* reserved == 0 */ 779 ERROR(tdev, "reserved bits set in bmAttributes\n"); 780 return 0; 781 } 782 783 /* bits[0:3] of wSpeedSupported is used and others are reserved */ 784 if (le16_to_cpu(ss->wSpeedSupported) & ~0x0f) { /* reserved == 0 */ 785 ERROR(tdev, "reserved bits set in wSpeedSupported\n"); 786 return 0; 787 } 788 789 return 1; 790 } 791 792 static int is_good_con_id(struct usbtest_dev *tdev, u8 *buf) 793 { 794 struct usb_ss_container_id_descriptor *con_id; 795 796 con_id = (struct usb_ss_container_id_descriptor *) buf; 797 798 if (con_id->bLength != USB_DT_USB_SS_CONTN_ID_SIZE) { 799 ERROR(tdev, "bogus container id descriptor length\n"); 800 return 0; 801 } 802 803 if (con_id->bReserved) { /* reserved == 0 */ 804 ERROR(tdev, "reserved bits set\n"); 805 return 0; 806 } 807 808 return 1; 809 } 810 811 /* sanity test for standard requests working with usb_control_mesg() and some 812 * of the utility functions which use it. 813 * 814 * this doesn't test how endpoint halts behave or data toggles get set, since 815 * we won't do I/O to bulk/interrupt endpoints here (which is how to change 816 * halt or toggle). toggle testing is impractical without support from hcds. 817 * 818 * this avoids failing devices linux would normally work with, by not testing 819 * config/altsetting operations for devices that only support their defaults. 820 * such devices rarely support those needless operations. 821 * 822 * NOTE that since this is a sanity test, it's not examining boundary cases 823 * to see if usbcore, hcd, and device all behave right. such testing would 824 * involve varied read sizes and other operation sequences. 825 */ 826 static int ch9_postconfig(struct usbtest_dev *dev) 827 { 828 struct usb_interface *iface = dev->intf; 829 struct usb_device *udev = interface_to_usbdev(iface); 830 int i, alt, retval; 831 832 /* [9.2.3] if there's more than one altsetting, we need to be able to 833 * set and get each one. mostly trusts the descriptors from usbcore. 834 */ 835 for (i = 0; i < iface->num_altsetting; i++) { 836 837 /* 9.2.3 constrains the range here */ 838 alt = iface->altsetting[i].desc.bAlternateSetting; 839 if (alt < 0 || alt >= iface->num_altsetting) { 840 dev_err(&iface->dev, 841 "invalid alt [%d].bAltSetting = %d\n", 842 i, alt); 843 } 844 845 /* [real world] get/set unimplemented if there's only one */ 846 if (realworld && iface->num_altsetting == 1) 847 continue; 848 849 /* [9.4.10] set_interface */ 850 retval = set_altsetting(dev, alt); 851 if (retval) { 852 dev_err(&iface->dev, "can't set_interface = %d, %d\n", 853 alt, retval); 854 return retval; 855 } 856 857 /* [9.4.4] get_interface always works */ 858 retval = get_altsetting(dev); 859 if (retval != alt) { 860 dev_err(&iface->dev, "get alt should be %d, was %d\n", 861 alt, retval); 862 return (retval < 0) ? retval : -EDOM; 863 } 864 865 } 866 867 /* [real world] get_config unimplemented if there's only one */ 868 if (!realworld || udev->descriptor.bNumConfigurations != 1) { 869 int expected = udev->actconfig->desc.bConfigurationValue; 870 871 /* [9.4.2] get_configuration always works 872 * ... although some cheap devices (like one TI Hub I've got) 873 * won't return config descriptors except before set_config. 874 */ 875 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 876 USB_REQ_GET_CONFIGURATION, 877 USB_DIR_IN | USB_RECIP_DEVICE, 878 0, 0, dev->buf, 1, USB_CTRL_GET_TIMEOUT); 879 if (retval != 1 || dev->buf[0] != expected) { 880 dev_err(&iface->dev, "get config --> %d %d (1 %d)\n", 881 retval, dev->buf[0], expected); 882 return (retval < 0) ? retval : -EDOM; 883 } 884 } 885 886 /* there's always [9.4.3] a device descriptor [9.6.1] */ 887 retval = usb_get_descriptor(udev, USB_DT_DEVICE, 0, 888 dev->buf, sizeof(udev->descriptor)); 889 if (retval != sizeof(udev->descriptor)) { 890 dev_err(&iface->dev, "dev descriptor --> %d\n", retval); 891 return (retval < 0) ? retval : -EDOM; 892 } 893 894 /* 895 * there's always [9.4.3] a bos device descriptor [9.6.2] in USB 896 * 3.0 spec 897 */ 898 if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0210) { 899 struct usb_bos_descriptor *bos = NULL; 900 struct usb_dev_cap_header *header = NULL; 901 unsigned total, num, length; 902 u8 *buf; 903 904 retval = usb_get_descriptor(udev, USB_DT_BOS, 0, dev->buf, 905 sizeof(*udev->bos->desc)); 906 if (retval != sizeof(*udev->bos->desc)) { 907 dev_err(&iface->dev, "bos descriptor --> %d\n", retval); 908 return (retval < 0) ? retval : -EDOM; 909 } 910 911 bos = (struct usb_bos_descriptor *)dev->buf; 912 total = le16_to_cpu(bos->wTotalLength); 913 num = bos->bNumDeviceCaps; 914 915 if (total > TBUF_SIZE) 916 total = TBUF_SIZE; 917 918 /* 919 * get generic device-level capability descriptors [9.6.2] 920 * in USB 3.0 spec 921 */ 922 retval = usb_get_descriptor(udev, USB_DT_BOS, 0, dev->buf, 923 total); 924 if (retval != total) { 925 dev_err(&iface->dev, "bos descriptor set --> %d\n", 926 retval); 927 return (retval < 0) ? retval : -EDOM; 928 } 929 930 length = sizeof(*udev->bos->desc); 931 buf = dev->buf; 932 for (i = 0; i < num; i++) { 933 buf += length; 934 if (buf + sizeof(struct usb_dev_cap_header) > 935 dev->buf + total) 936 break; 937 938 header = (struct usb_dev_cap_header *)buf; 939 length = header->bLength; 940 941 if (header->bDescriptorType != 942 USB_DT_DEVICE_CAPABILITY) { 943 dev_warn(&udev->dev, "not device capability descriptor, skip\n"); 944 continue; 945 } 946 947 switch (header->bDevCapabilityType) { 948 case USB_CAP_TYPE_EXT: 949 if (buf + USB_DT_USB_EXT_CAP_SIZE > 950 dev->buf + total || 951 !is_good_ext(dev, buf)) { 952 dev_err(&iface->dev, "bogus usb 2.0 extension descriptor\n"); 953 return -EDOM; 954 } 955 break; 956 case USB_SS_CAP_TYPE: 957 if (buf + USB_DT_USB_SS_CAP_SIZE > 958 dev->buf + total || 959 !is_good_ss_cap(dev, buf)) { 960 dev_err(&iface->dev, "bogus superspeed device capability descriptor\n"); 961 return -EDOM; 962 } 963 break; 964 case CONTAINER_ID_TYPE: 965 if (buf + USB_DT_USB_SS_CONTN_ID_SIZE > 966 dev->buf + total || 967 !is_good_con_id(dev, buf)) { 968 dev_err(&iface->dev, "bogus container id descriptor\n"); 969 return -EDOM; 970 } 971 break; 972 default: 973 break; 974 } 975 } 976 } 977 978 /* there's always [9.4.3] at least one config descriptor [9.6.3] */ 979 for (i = 0; i < udev->descriptor.bNumConfigurations; i++) { 980 retval = usb_get_descriptor(udev, USB_DT_CONFIG, i, 981 dev->buf, TBUF_SIZE); 982 if (!is_good_config(dev, retval)) { 983 dev_err(&iface->dev, 984 "config [%d] descriptor --> %d\n", 985 i, retval); 986 return (retval < 0) ? retval : -EDOM; 987 } 988 989 /* FIXME cross-checking udev->config[i] to make sure usbcore 990 * parsed it right (etc) would be good testing paranoia 991 */ 992 } 993 994 /* and sometimes [9.2.6.6] speed dependent descriptors */ 995 if (le16_to_cpu(udev->descriptor.bcdUSB) == 0x0200) { 996 struct usb_qualifier_descriptor *d = NULL; 997 998 /* device qualifier [9.6.2] */ 999 retval = usb_get_descriptor(udev, 1000 USB_DT_DEVICE_QUALIFIER, 0, dev->buf, 1001 sizeof(struct usb_qualifier_descriptor)); 1002 if (retval == -EPIPE) { 1003 if (udev->speed == USB_SPEED_HIGH) { 1004 dev_err(&iface->dev, 1005 "hs dev qualifier --> %d\n", 1006 retval); 1007 return retval; 1008 } 1009 /* usb2.0 but not high-speed capable; fine */ 1010 } else if (retval != sizeof(struct usb_qualifier_descriptor)) { 1011 dev_err(&iface->dev, "dev qualifier --> %d\n", retval); 1012 return (retval < 0) ? retval : -EDOM; 1013 } else 1014 d = (struct usb_qualifier_descriptor *) dev->buf; 1015 1016 /* might not have [9.6.2] any other-speed configs [9.6.4] */ 1017 if (d) { 1018 unsigned max = d->bNumConfigurations; 1019 for (i = 0; i < max; i++) { 1020 retval = usb_get_descriptor(udev, 1021 USB_DT_OTHER_SPEED_CONFIG, i, 1022 dev->buf, TBUF_SIZE); 1023 if (!is_good_config(dev, retval)) { 1024 dev_err(&iface->dev, 1025 "other speed config --> %d\n", 1026 retval); 1027 return (retval < 0) ? retval : -EDOM; 1028 } 1029 } 1030 } 1031 } 1032 /* FIXME fetch strings from at least the device descriptor */ 1033 1034 /* [9.4.5] get_status always works */ 1035 retval = usb_get_std_status(udev, USB_RECIP_DEVICE, 0, dev->buf); 1036 if (retval) { 1037 dev_err(&iface->dev, "get dev status --> %d\n", retval); 1038 return retval; 1039 } 1040 1041 /* FIXME configuration.bmAttributes says if we could try to set/clear 1042 * the device's remote wakeup feature ... if we can, test that here 1043 */ 1044 1045 retval = usb_get_std_status(udev, USB_RECIP_INTERFACE, 1046 iface->altsetting[0].desc.bInterfaceNumber, dev->buf); 1047 if (retval) { 1048 dev_err(&iface->dev, "get interface status --> %d\n", retval); 1049 return retval; 1050 } 1051 /* FIXME get status for each endpoint in the interface */ 1052 1053 return 0; 1054 } 1055 1056 /*-------------------------------------------------------------------------*/ 1057 1058 /* use ch9 requests to test whether: 1059 * (a) queues work for control, keeping N subtests queued and 1060 * active (auto-resubmit) for M loops through the queue. 1061 * (b) protocol stalls (control-only) will autorecover. 1062 * it's not like bulk/intr; no halt clearing. 1063 * (c) short control reads are reported and handled. 1064 * (d) queues are always processed in-order 1065 */ 1066 1067 struct ctrl_ctx { 1068 spinlock_t lock; 1069 struct usbtest_dev *dev; 1070 struct completion complete; 1071 unsigned count; 1072 unsigned pending; 1073 int status; 1074 struct urb **urb; 1075 struct usbtest_param_32 *param; 1076 int last; 1077 }; 1078 1079 #define NUM_SUBCASES 16 /* how many test subcases here? */ 1080 1081 struct subcase { 1082 struct usb_ctrlrequest setup; 1083 int number; 1084 int expected; 1085 }; 1086 1087 static void ctrl_complete(struct urb *urb) 1088 { 1089 struct ctrl_ctx *ctx = urb->context; 1090 struct usb_ctrlrequest *reqp; 1091 struct subcase *subcase; 1092 int status = urb->status; 1093 unsigned long flags; 1094 1095 reqp = (struct usb_ctrlrequest *)urb->setup_packet; 1096 subcase = container_of(reqp, struct subcase, setup); 1097 1098 spin_lock_irqsave(&ctx->lock, flags); 1099 ctx->count--; 1100 ctx->pending--; 1101 1102 /* queue must transfer and complete in fifo order, unless 1103 * usb_unlink_urb() is used to unlink something not at the 1104 * physical queue head (not tested). 1105 */ 1106 if (subcase->number > 0) { 1107 if ((subcase->number - ctx->last) != 1) { 1108 ERROR(ctx->dev, 1109 "subcase %d completed out of order, last %d\n", 1110 subcase->number, ctx->last); 1111 status = -EDOM; 1112 ctx->last = subcase->number; 1113 goto error; 1114 } 1115 } 1116 ctx->last = subcase->number; 1117 1118 /* succeed or fault in only one way? */ 1119 if (status == subcase->expected) 1120 status = 0; 1121 1122 /* async unlink for cleanup? */ 1123 else if (status != -ECONNRESET) { 1124 1125 /* some faults are allowed, not required */ 1126 if (subcase->expected > 0 && ( 1127 ((status == -subcase->expected /* happened */ 1128 || status == 0)))) /* didn't */ 1129 status = 0; 1130 /* sometimes more than one fault is allowed */ 1131 else if (subcase->number == 12 && status == -EPIPE) 1132 status = 0; 1133 else 1134 ERROR(ctx->dev, "subtest %d error, status %d\n", 1135 subcase->number, status); 1136 } 1137 1138 /* unexpected status codes mean errors; ideally, in hardware */ 1139 if (status) { 1140 error: 1141 if (ctx->status == 0) { 1142 int i; 1143 1144 ctx->status = status; 1145 ERROR(ctx->dev, "control queue %02x.%02x, err %d, " 1146 "%d left, subcase %d, len %d/%d\n", 1147 reqp->bRequestType, reqp->bRequest, 1148 status, ctx->count, subcase->number, 1149 urb->actual_length, 1150 urb->transfer_buffer_length); 1151 1152 /* FIXME this "unlink everything" exit route should 1153 * be a separate test case. 1154 */ 1155 1156 /* unlink whatever's still pending */ 1157 for (i = 1; i < ctx->param->sglen; i++) { 1158 struct urb *u = ctx->urb[ 1159 (i + subcase->number) 1160 % ctx->param->sglen]; 1161 1162 if (u == urb || !u->dev) 1163 continue; 1164 spin_unlock(&ctx->lock); 1165 status = usb_unlink_urb(u); 1166 spin_lock(&ctx->lock); 1167 switch (status) { 1168 case -EINPROGRESS: 1169 case -EBUSY: 1170 case -EIDRM: 1171 continue; 1172 default: 1173 ERROR(ctx->dev, "urb unlink --> %d\n", 1174 status); 1175 } 1176 } 1177 status = ctx->status; 1178 } 1179 } 1180 1181 /* resubmit if we need to, else mark this as done */ 1182 if ((status == 0) && (ctx->pending < ctx->count)) { 1183 status = usb_submit_urb(urb, GFP_ATOMIC); 1184 if (status != 0) { 1185 ERROR(ctx->dev, 1186 "can't resubmit ctrl %02x.%02x, err %d\n", 1187 reqp->bRequestType, reqp->bRequest, status); 1188 urb->dev = NULL; 1189 } else 1190 ctx->pending++; 1191 } else 1192 urb->dev = NULL; 1193 1194 /* signal completion when nothing's queued */ 1195 if (ctx->pending == 0) 1196 complete(&ctx->complete); 1197 spin_unlock_irqrestore(&ctx->lock, flags); 1198 } 1199 1200 static int 1201 test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param_32 *param) 1202 { 1203 struct usb_device *udev = testdev_to_usbdev(dev); 1204 struct urb **urb; 1205 struct ctrl_ctx context; 1206 int i; 1207 1208 if (param->sglen == 0 || param->iterations > UINT_MAX / param->sglen) 1209 return -EOPNOTSUPP; 1210 1211 spin_lock_init(&context.lock); 1212 context.dev = dev; 1213 init_completion(&context.complete); 1214 context.count = param->sglen * param->iterations; 1215 context.pending = 0; 1216 context.status = -ENOMEM; 1217 context.param = param; 1218 context.last = -1; 1219 1220 /* allocate and init the urbs we'll queue. 1221 * as with bulk/intr sglists, sglen is the queue depth; it also 1222 * controls which subtests run (more tests than sglen) or rerun. 1223 */ 1224 urb = kcalloc(param->sglen, sizeof(struct urb *), GFP_KERNEL); 1225 if (!urb) 1226 return -ENOMEM; 1227 for (i = 0; i < param->sglen; i++) { 1228 int pipe = usb_rcvctrlpipe(udev, 0); 1229 unsigned len; 1230 struct urb *u; 1231 struct usb_ctrlrequest req; 1232 struct subcase *reqp; 1233 1234 /* sign of this variable means: 1235 * -: tested code must return this (negative) error code 1236 * +: tested code may return this (negative too) error code 1237 */ 1238 int expected = 0; 1239 1240 /* requests here are mostly expected to succeed on any 1241 * device, but some are chosen to trigger protocol stalls 1242 * or short reads. 1243 */ 1244 memset(&req, 0, sizeof(req)); 1245 req.bRequest = USB_REQ_GET_DESCRIPTOR; 1246 req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE; 1247 1248 switch (i % NUM_SUBCASES) { 1249 case 0: /* get device descriptor */ 1250 req.wValue = cpu_to_le16(USB_DT_DEVICE << 8); 1251 len = sizeof(struct usb_device_descriptor); 1252 break; 1253 case 1: /* get first config descriptor (only) */ 1254 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0); 1255 len = sizeof(struct usb_config_descriptor); 1256 break; 1257 case 2: /* get altsetting (OFTEN STALLS) */ 1258 req.bRequest = USB_REQ_GET_INTERFACE; 1259 req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE; 1260 /* index = 0 means first interface */ 1261 len = 1; 1262 expected = EPIPE; 1263 break; 1264 case 3: /* get interface status */ 1265 req.bRequest = USB_REQ_GET_STATUS; 1266 req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE; 1267 /* interface 0 */ 1268 len = 2; 1269 break; 1270 case 4: /* get device status */ 1271 req.bRequest = USB_REQ_GET_STATUS; 1272 req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE; 1273 len = 2; 1274 break; 1275 case 5: /* get device qualifier (MAY STALL) */ 1276 req.wValue = cpu_to_le16 (USB_DT_DEVICE_QUALIFIER << 8); 1277 len = sizeof(struct usb_qualifier_descriptor); 1278 if (udev->speed != USB_SPEED_HIGH) 1279 expected = EPIPE; 1280 break; 1281 case 6: /* get first config descriptor, plus interface */ 1282 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0); 1283 len = sizeof(struct usb_config_descriptor); 1284 len += sizeof(struct usb_interface_descriptor); 1285 break; 1286 case 7: /* get interface descriptor (ALWAYS STALLS) */ 1287 req.wValue = cpu_to_le16 (USB_DT_INTERFACE << 8); 1288 /* interface == 0 */ 1289 len = sizeof(struct usb_interface_descriptor); 1290 expected = -EPIPE; 1291 break; 1292 /* NOTE: two consecutive stalls in the queue here. 1293 * that tests fault recovery a bit more aggressively. */ 1294 case 8: /* clear endpoint halt (MAY STALL) */ 1295 req.bRequest = USB_REQ_CLEAR_FEATURE; 1296 req.bRequestType = USB_RECIP_ENDPOINT; 1297 /* wValue 0 == ep halt */ 1298 /* wIndex 0 == ep0 (shouldn't halt!) */ 1299 len = 0; 1300 pipe = usb_sndctrlpipe(udev, 0); 1301 expected = EPIPE; 1302 break; 1303 case 9: /* get endpoint status */ 1304 req.bRequest = USB_REQ_GET_STATUS; 1305 req.bRequestType = USB_DIR_IN|USB_RECIP_ENDPOINT; 1306 /* endpoint 0 */ 1307 len = 2; 1308 break; 1309 case 10: /* trigger short read (EREMOTEIO) */ 1310 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0); 1311 len = 1024; 1312 expected = -EREMOTEIO; 1313 break; 1314 /* NOTE: two consecutive _different_ faults in the queue. */ 1315 case 11: /* get endpoint descriptor (ALWAYS STALLS) */ 1316 req.wValue = cpu_to_le16(USB_DT_ENDPOINT << 8); 1317 /* endpoint == 0 */ 1318 len = sizeof(struct usb_interface_descriptor); 1319 expected = EPIPE; 1320 break; 1321 /* NOTE: sometimes even a third fault in the queue! */ 1322 case 12: /* get string 0 descriptor (MAY STALL) */ 1323 req.wValue = cpu_to_le16(USB_DT_STRING << 8); 1324 /* string == 0, for language IDs */ 1325 len = sizeof(struct usb_interface_descriptor); 1326 /* may succeed when > 4 languages */ 1327 expected = EREMOTEIO; /* or EPIPE, if no strings */ 1328 break; 1329 case 13: /* short read, resembling case 10 */ 1330 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0); 1331 /* last data packet "should" be DATA1, not DATA0 */ 1332 if (udev->speed == USB_SPEED_SUPER) 1333 len = 1024 - 512; 1334 else 1335 len = 1024 - udev->descriptor.bMaxPacketSize0; 1336 expected = -EREMOTEIO; 1337 break; 1338 case 14: /* short read; try to fill the last packet */ 1339 req.wValue = cpu_to_le16((USB_DT_DEVICE << 8) | 0); 1340 /* device descriptor size == 18 bytes */ 1341 len = udev->descriptor.bMaxPacketSize0; 1342 if (udev->speed == USB_SPEED_SUPER) 1343 len = 512; 1344 switch (len) { 1345 case 8: 1346 len = 24; 1347 break; 1348 case 16: 1349 len = 32; 1350 break; 1351 } 1352 expected = -EREMOTEIO; 1353 break; 1354 case 15: 1355 req.wValue = cpu_to_le16(USB_DT_BOS << 8); 1356 if (udev->bos) 1357 len = le16_to_cpu(udev->bos->desc->wTotalLength); 1358 else 1359 len = sizeof(struct usb_bos_descriptor); 1360 if (le16_to_cpu(udev->descriptor.bcdUSB) < 0x0201) 1361 expected = -EPIPE; 1362 break; 1363 default: 1364 ERROR(dev, "bogus number of ctrl queue testcases!\n"); 1365 context.status = -EINVAL; 1366 goto cleanup; 1367 } 1368 req.wLength = cpu_to_le16(len); 1369 urb[i] = u = simple_alloc_urb(udev, pipe, len, 0); 1370 if (!u) 1371 goto cleanup; 1372 1373 reqp = kmalloc(sizeof(*reqp), GFP_KERNEL); 1374 if (!reqp) 1375 goto cleanup; 1376 reqp->setup = req; 1377 reqp->number = i % NUM_SUBCASES; 1378 reqp->expected = expected; 1379 u->setup_packet = (char *) &reqp->setup; 1380 1381 u->context = &context; 1382 u->complete = ctrl_complete; 1383 } 1384 1385 /* queue the urbs */ 1386 context.urb = urb; 1387 spin_lock_irq(&context.lock); 1388 for (i = 0; i < param->sglen; i++) { 1389 context.status = usb_submit_urb(urb[i], GFP_ATOMIC); 1390 if (context.status != 0) { 1391 ERROR(dev, "can't submit urb[%d], status %d\n", 1392 i, context.status); 1393 context.count = context.pending; 1394 break; 1395 } 1396 context.pending++; 1397 } 1398 spin_unlock_irq(&context.lock); 1399 1400 /* FIXME set timer and time out; provide a disconnect hook */ 1401 1402 /* wait for the last one to complete */ 1403 if (context.pending > 0) 1404 wait_for_completion(&context.complete); 1405 1406 cleanup: 1407 for (i = 0; i < param->sglen; i++) { 1408 if (!urb[i]) 1409 continue; 1410 urb[i]->dev = udev; 1411 kfree(urb[i]->setup_packet); 1412 simple_free_urb(urb[i]); 1413 } 1414 kfree(urb); 1415 return context.status; 1416 } 1417 #undef NUM_SUBCASES 1418 1419 1420 /*-------------------------------------------------------------------------*/ 1421 1422 static void unlink1_callback(struct urb *urb) 1423 { 1424 int status = urb->status; 1425 1426 /* we "know" -EPIPE (stall) never happens */ 1427 if (!status) 1428 status = usb_submit_urb(urb, GFP_ATOMIC); 1429 if (status) { 1430 urb->status = status; 1431 complete(urb->context); 1432 } 1433 } 1434 1435 static int unlink1(struct usbtest_dev *dev, int pipe, int size, int async) 1436 { 1437 struct urb *urb; 1438 struct completion completion; 1439 int retval = 0; 1440 1441 init_completion(&completion); 1442 urb = simple_alloc_urb(testdev_to_usbdev(dev), pipe, size, 0); 1443 if (!urb) 1444 return -ENOMEM; 1445 urb->context = &completion; 1446 urb->complete = unlink1_callback; 1447 1448 if (usb_pipeout(urb->pipe)) { 1449 simple_fill_buf(urb); 1450 urb->transfer_flags |= URB_ZERO_PACKET; 1451 } 1452 1453 /* keep the endpoint busy. there are lots of hc/hcd-internal 1454 * states, and testing should get to all of them over time. 1455 * 1456 * FIXME want additional tests for when endpoint is STALLing 1457 * due to errors, or is just NAKing requests. 1458 */ 1459 retval = usb_submit_urb(urb, GFP_KERNEL); 1460 if (retval != 0) { 1461 dev_err(&dev->intf->dev, "submit fail %d\n", retval); 1462 return retval; 1463 } 1464 1465 /* unlinking that should always work. variable delay tests more 1466 * hcd states and code paths, even with little other system load. 1467 */ 1468 msleep(jiffies % (2 * INTERRUPT_RATE)); 1469 if (async) { 1470 while (!completion_done(&completion)) { 1471 retval = usb_unlink_urb(urb); 1472 1473 if (retval == 0 && usb_pipein(urb->pipe)) 1474 retval = simple_check_buf(dev, urb); 1475 1476 switch (retval) { 1477 case -EBUSY: 1478 case -EIDRM: 1479 /* we can't unlink urbs while they're completing 1480 * or if they've completed, and we haven't 1481 * resubmitted. "normal" drivers would prevent 1482 * resubmission, but since we're testing unlink 1483 * paths, we can't. 1484 */ 1485 ERROR(dev, "unlink retry\n"); 1486 continue; 1487 case 0: 1488 case -EINPROGRESS: 1489 break; 1490 1491 default: 1492 dev_err(&dev->intf->dev, 1493 "unlink fail %d\n", retval); 1494 return retval; 1495 } 1496 1497 break; 1498 } 1499 } else 1500 usb_kill_urb(urb); 1501 1502 wait_for_completion(&completion); 1503 retval = urb->status; 1504 simple_free_urb(urb); 1505 1506 if (async) 1507 return (retval == -ECONNRESET) ? 0 : retval - 1000; 1508 else 1509 return (retval == -ENOENT || retval == -EPERM) ? 1510 0 : retval - 2000; 1511 } 1512 1513 static int unlink_simple(struct usbtest_dev *dev, int pipe, int len) 1514 { 1515 int retval = 0; 1516 1517 /* test sync and async paths */ 1518 retval = unlink1(dev, pipe, len, 1); 1519 if (!retval) 1520 retval = unlink1(dev, pipe, len, 0); 1521 return retval; 1522 } 1523 1524 /*-------------------------------------------------------------------------*/ 1525 1526 struct queued_ctx { 1527 struct completion complete; 1528 atomic_t pending; 1529 unsigned num; 1530 int status; 1531 struct urb **urbs; 1532 }; 1533 1534 static void unlink_queued_callback(struct urb *urb) 1535 { 1536 int status = urb->status; 1537 struct queued_ctx *ctx = urb->context; 1538 1539 if (ctx->status) 1540 goto done; 1541 if (urb == ctx->urbs[ctx->num - 4] || urb == ctx->urbs[ctx->num - 2]) { 1542 if (status == -ECONNRESET) 1543 goto done; 1544 /* What error should we report if the URB completed normally? */ 1545 } 1546 if (status != 0) 1547 ctx->status = status; 1548 1549 done: 1550 if (atomic_dec_and_test(&ctx->pending)) 1551 complete(&ctx->complete); 1552 } 1553 1554 static int unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num, 1555 unsigned size) 1556 { 1557 struct queued_ctx ctx; 1558 struct usb_device *udev = testdev_to_usbdev(dev); 1559 void *buf; 1560 dma_addr_t buf_dma; 1561 int i; 1562 int retval = -ENOMEM; 1563 1564 init_completion(&ctx.complete); 1565 atomic_set(&ctx.pending, 1); /* One more than the actual value */ 1566 ctx.num = num; 1567 ctx.status = 0; 1568 1569 buf = usb_alloc_coherent(udev, size, GFP_KERNEL, &buf_dma); 1570 if (!buf) 1571 return retval; 1572 memset(buf, 0, size); 1573 1574 /* Allocate and init the urbs we'll queue */ 1575 ctx.urbs = kcalloc(num, sizeof(struct urb *), GFP_KERNEL); 1576 if (!ctx.urbs) 1577 goto free_buf; 1578 for (i = 0; i < num; i++) { 1579 ctx.urbs[i] = usb_alloc_urb(0, GFP_KERNEL); 1580 if (!ctx.urbs[i]) 1581 goto free_urbs; 1582 usb_fill_bulk_urb(ctx.urbs[i], udev, pipe, buf, size, 1583 unlink_queued_callback, &ctx); 1584 ctx.urbs[i]->transfer_dma = buf_dma; 1585 ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP; 1586 1587 if (usb_pipeout(ctx.urbs[i]->pipe)) { 1588 simple_fill_buf(ctx.urbs[i]); 1589 ctx.urbs[i]->transfer_flags |= URB_ZERO_PACKET; 1590 } 1591 } 1592 1593 /* Submit all the URBs and then unlink URBs num - 4 and num - 2. */ 1594 for (i = 0; i < num; i++) { 1595 atomic_inc(&ctx.pending); 1596 retval = usb_submit_urb(ctx.urbs[i], GFP_KERNEL); 1597 if (retval != 0) { 1598 dev_err(&dev->intf->dev, "submit urbs[%d] fail %d\n", 1599 i, retval); 1600 atomic_dec(&ctx.pending); 1601 ctx.status = retval; 1602 break; 1603 } 1604 } 1605 if (i == num) { 1606 usb_unlink_urb(ctx.urbs[num - 4]); 1607 usb_unlink_urb(ctx.urbs[num - 2]); 1608 } else { 1609 while (--i >= 0) 1610 usb_unlink_urb(ctx.urbs[i]); 1611 } 1612 1613 if (atomic_dec_and_test(&ctx.pending)) /* The extra count */ 1614 complete(&ctx.complete); 1615 wait_for_completion(&ctx.complete); 1616 retval = ctx.status; 1617 1618 free_urbs: 1619 for (i = 0; i < num; i++) 1620 usb_free_urb(ctx.urbs[i]); 1621 kfree(ctx.urbs); 1622 free_buf: 1623 usb_free_coherent(udev, size, buf, buf_dma); 1624 return retval; 1625 } 1626 1627 /*-------------------------------------------------------------------------*/ 1628 1629 static int verify_not_halted(struct usbtest_dev *tdev, int ep, struct urb *urb) 1630 { 1631 int retval; 1632 u16 status; 1633 1634 /* shouldn't look or act halted */ 1635 retval = usb_get_std_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status); 1636 if (retval < 0) { 1637 ERROR(tdev, "ep %02x couldn't get no-halt status, %d\n", 1638 ep, retval); 1639 return retval; 1640 } 1641 if (status != 0) { 1642 ERROR(tdev, "ep %02x bogus status: %04x != 0\n", ep, status); 1643 return -EINVAL; 1644 } 1645 retval = simple_io(tdev, urb, 1, 0, 0, __func__); 1646 if (retval != 0) 1647 return -EINVAL; 1648 return 0; 1649 } 1650 1651 static int verify_halted(struct usbtest_dev *tdev, int ep, struct urb *urb) 1652 { 1653 int retval; 1654 u16 status; 1655 1656 /* should look and act halted */ 1657 retval = usb_get_std_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status); 1658 if (retval < 0) { 1659 ERROR(tdev, "ep %02x couldn't get halt status, %d\n", 1660 ep, retval); 1661 return retval; 1662 } 1663 if (status != 1) { 1664 ERROR(tdev, "ep %02x bogus status: %04x != 1\n", ep, status); 1665 return -EINVAL; 1666 } 1667 retval = simple_io(tdev, urb, 1, 0, -EPIPE, __func__); 1668 if (retval != -EPIPE) 1669 return -EINVAL; 1670 retval = simple_io(tdev, urb, 1, 0, -EPIPE, "verify_still_halted"); 1671 if (retval != -EPIPE) 1672 return -EINVAL; 1673 return 0; 1674 } 1675 1676 static int test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb) 1677 { 1678 int retval; 1679 1680 /* shouldn't look or act halted now */ 1681 retval = verify_not_halted(tdev, ep, urb); 1682 if (retval < 0) 1683 return retval; 1684 1685 /* set halt (protocol test only), verify it worked */ 1686 retval = usb_control_msg(urb->dev, usb_sndctrlpipe(urb->dev, 0), 1687 USB_REQ_SET_FEATURE, USB_RECIP_ENDPOINT, 1688 USB_ENDPOINT_HALT, ep, 1689 NULL, 0, USB_CTRL_SET_TIMEOUT); 1690 if (retval < 0) { 1691 ERROR(tdev, "ep %02x couldn't set halt, %d\n", ep, retval); 1692 return retval; 1693 } 1694 retval = verify_halted(tdev, ep, urb); 1695 if (retval < 0) { 1696 int ret; 1697 1698 /* clear halt anyways, else further tests will fail */ 1699 ret = usb_clear_halt(urb->dev, urb->pipe); 1700 if (ret) 1701 ERROR(tdev, "ep %02x couldn't clear halt, %d\n", 1702 ep, ret); 1703 1704 return retval; 1705 } 1706 1707 /* clear halt (tests API + protocol), verify it worked */ 1708 retval = usb_clear_halt(urb->dev, urb->pipe); 1709 if (retval < 0) { 1710 ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval); 1711 return retval; 1712 } 1713 retval = verify_not_halted(tdev, ep, urb); 1714 if (retval < 0) 1715 return retval; 1716 1717 /* NOTE: could also verify SET_INTERFACE clear halts ... */ 1718 1719 return 0; 1720 } 1721 1722 static int test_toggle_sync(struct usbtest_dev *tdev, int ep, struct urb *urb) 1723 { 1724 int retval; 1725 1726 /* clear initial data toggle to DATA0 */ 1727 retval = usb_clear_halt(urb->dev, urb->pipe); 1728 if (retval < 0) { 1729 ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval); 1730 return retval; 1731 } 1732 1733 /* transfer 3 data packets, should be DATA0, DATA1, DATA0 */ 1734 retval = simple_io(tdev, urb, 1, 0, 0, __func__); 1735 if (retval != 0) 1736 return -EINVAL; 1737 1738 /* clear halt resets device side data toggle, host should react to it */ 1739 retval = usb_clear_halt(urb->dev, urb->pipe); 1740 if (retval < 0) { 1741 ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval); 1742 return retval; 1743 } 1744 1745 /* host should use DATA0 again after clear halt */ 1746 retval = simple_io(tdev, urb, 1, 0, 0, __func__); 1747 1748 return retval; 1749 } 1750 1751 static int halt_simple(struct usbtest_dev *dev) 1752 { 1753 int ep; 1754 int retval = 0; 1755 struct urb *urb; 1756 struct usb_device *udev = testdev_to_usbdev(dev); 1757 1758 if (udev->speed == USB_SPEED_SUPER) 1759 urb = simple_alloc_urb(udev, 0, 1024, 0); 1760 else 1761 urb = simple_alloc_urb(udev, 0, 512, 0); 1762 if (urb == NULL) 1763 return -ENOMEM; 1764 1765 if (dev->in_pipe) { 1766 ep = usb_pipeendpoint(dev->in_pipe) | USB_DIR_IN; 1767 urb->pipe = dev->in_pipe; 1768 retval = test_halt(dev, ep, urb); 1769 if (retval < 0) 1770 goto done; 1771 } 1772 1773 if (dev->out_pipe) { 1774 ep = usb_pipeendpoint(dev->out_pipe); 1775 urb->pipe = dev->out_pipe; 1776 retval = test_halt(dev, ep, urb); 1777 } 1778 done: 1779 simple_free_urb(urb); 1780 return retval; 1781 } 1782 1783 static int toggle_sync_simple(struct usbtest_dev *dev) 1784 { 1785 int ep; 1786 int retval = 0; 1787 struct urb *urb; 1788 struct usb_device *udev = testdev_to_usbdev(dev); 1789 unsigned maxp = get_maxpacket(udev, dev->out_pipe); 1790 1791 /* 1792 * Create a URB that causes a transfer of uneven amount of data packets 1793 * This way the clear toggle has an impact on the data toggle sequence. 1794 * Use 2 maxpacket length packets and one zero packet. 1795 */ 1796 urb = simple_alloc_urb(udev, 0, 2 * maxp, 0); 1797 if (urb == NULL) 1798 return -ENOMEM; 1799 1800 urb->transfer_flags |= URB_ZERO_PACKET; 1801 1802 ep = usb_pipeendpoint(dev->out_pipe); 1803 urb->pipe = dev->out_pipe; 1804 retval = test_toggle_sync(dev, ep, urb); 1805 1806 simple_free_urb(urb); 1807 return retval; 1808 } 1809 1810 /*-------------------------------------------------------------------------*/ 1811 1812 /* Control OUT tests use the vendor control requests from Intel's 1813 * USB 2.0 compliance test device: write a buffer, read it back. 1814 * 1815 * Intel's spec only _requires_ that it work for one packet, which 1816 * is pretty weak. Some HCDs place limits here; most devices will 1817 * need to be able to handle more than one OUT data packet. We'll 1818 * try whatever we're told to try. 1819 */ 1820 static int ctrl_out(struct usbtest_dev *dev, 1821 unsigned count, unsigned length, unsigned vary, unsigned offset) 1822 { 1823 unsigned i, j, len; 1824 int retval; 1825 u8 *buf; 1826 char *what = "?"; 1827 struct usb_device *udev; 1828 1829 if (length < 1 || length > 0xffff || vary >= length) 1830 return -EINVAL; 1831 1832 buf = kmalloc(length + offset, GFP_KERNEL); 1833 if (!buf) 1834 return -ENOMEM; 1835 1836 buf += offset; 1837 udev = testdev_to_usbdev(dev); 1838 len = length; 1839 retval = 0; 1840 1841 /* NOTE: hardware might well act differently if we pushed it 1842 * with lots back-to-back queued requests. 1843 */ 1844 for (i = 0; i < count; i++) { 1845 /* write patterned data */ 1846 for (j = 0; j < len; j++) 1847 buf[j] = (u8)(i + j); 1848 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 1849 0x5b, USB_DIR_OUT|USB_TYPE_VENDOR, 1850 0, 0, buf, len, USB_CTRL_SET_TIMEOUT); 1851 if (retval != len) { 1852 what = "write"; 1853 if (retval >= 0) { 1854 ERROR(dev, "ctrl_out, wlen %d (expected %d)\n", 1855 retval, len); 1856 retval = -EBADMSG; 1857 } 1858 break; 1859 } 1860 1861 /* read it back -- assuming nothing intervened!! */ 1862 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 1863 0x5c, USB_DIR_IN|USB_TYPE_VENDOR, 1864 0, 0, buf, len, USB_CTRL_GET_TIMEOUT); 1865 if (retval != len) { 1866 what = "read"; 1867 if (retval >= 0) { 1868 ERROR(dev, "ctrl_out, rlen %d (expected %d)\n", 1869 retval, len); 1870 retval = -EBADMSG; 1871 } 1872 break; 1873 } 1874 1875 /* fail if we can't verify */ 1876 for (j = 0; j < len; j++) { 1877 if (buf[j] != (u8)(i + j)) { 1878 ERROR(dev, "ctrl_out, byte %d is %d not %d\n", 1879 j, buf[j], (u8)(i + j)); 1880 retval = -EBADMSG; 1881 break; 1882 } 1883 } 1884 if (retval < 0) { 1885 what = "verify"; 1886 break; 1887 } 1888 1889 len += vary; 1890 1891 /* [real world] the "zero bytes IN" case isn't really used. 1892 * hardware can easily trip up in this weird case, since its 1893 * status stage is IN, not OUT like other ep0in transfers. 1894 */ 1895 if (len > length) 1896 len = realworld ? 1 : 0; 1897 } 1898 1899 if (retval < 0) 1900 ERROR(dev, "ctrl_out %s failed, code %d, count %d\n", 1901 what, retval, i); 1902 1903 kfree(buf - offset); 1904 return retval; 1905 } 1906 1907 /*-------------------------------------------------------------------------*/ 1908 1909 /* ISO/BULK tests ... mimics common usage 1910 * - buffer length is split into N packets (mostly maxpacket sized) 1911 * - multi-buffers according to sglen 1912 */ 1913 1914 struct transfer_context { 1915 unsigned count; 1916 unsigned pending; 1917 spinlock_t lock; 1918 struct completion done; 1919 int submit_error; 1920 unsigned long errors; 1921 unsigned long packet_count; 1922 struct usbtest_dev *dev; 1923 bool is_iso; 1924 }; 1925 1926 static void complicated_callback(struct urb *urb) 1927 { 1928 struct transfer_context *ctx = urb->context; 1929 unsigned long flags; 1930 1931 spin_lock_irqsave(&ctx->lock, flags); 1932 ctx->count--; 1933 1934 ctx->packet_count += urb->number_of_packets; 1935 if (urb->error_count > 0) 1936 ctx->errors += urb->error_count; 1937 else if (urb->status != 0) 1938 ctx->errors += (ctx->is_iso ? urb->number_of_packets : 1); 1939 else if (urb->actual_length != urb->transfer_buffer_length) 1940 ctx->errors++; 1941 else if (check_guard_bytes(ctx->dev, urb) != 0) 1942 ctx->errors++; 1943 1944 if (urb->status == 0 && ctx->count > (ctx->pending - 1) 1945 && !ctx->submit_error) { 1946 int status = usb_submit_urb(urb, GFP_ATOMIC); 1947 switch (status) { 1948 case 0: 1949 goto done; 1950 default: 1951 dev_err(&ctx->dev->intf->dev, 1952 "resubmit err %d\n", 1953 status); 1954 fallthrough; 1955 case -ENODEV: /* disconnected */ 1956 case -ESHUTDOWN: /* endpoint disabled */ 1957 ctx->submit_error = 1; 1958 break; 1959 } 1960 } 1961 1962 ctx->pending--; 1963 if (ctx->pending == 0) { 1964 if (ctx->errors) 1965 dev_err(&ctx->dev->intf->dev, 1966 "during the test, %lu errors out of %lu\n", 1967 ctx->errors, ctx->packet_count); 1968 complete(&ctx->done); 1969 } 1970 done: 1971 spin_unlock_irqrestore(&ctx->lock, flags); 1972 } 1973 1974 static struct urb *iso_alloc_urb( 1975 struct usb_device *udev, 1976 int pipe, 1977 struct usb_endpoint_descriptor *desc, 1978 long bytes, 1979 unsigned offset 1980 ) 1981 { 1982 struct urb *urb; 1983 unsigned i, maxp, packets; 1984 1985 if (bytes < 0 || !desc) 1986 return NULL; 1987 1988 maxp = usb_endpoint_maxp(desc); 1989 if (udev->speed >= USB_SPEED_SUPER) 1990 maxp *= ss_isoc_get_packet_num(udev, pipe); 1991 else 1992 maxp *= usb_endpoint_maxp_mult(desc); 1993 1994 packets = DIV_ROUND_UP(bytes, maxp); 1995 1996 urb = usb_alloc_urb(packets, GFP_KERNEL); 1997 if (!urb) 1998 return urb; 1999 urb->dev = udev; 2000 urb->pipe = pipe; 2001 2002 urb->number_of_packets = packets; 2003 urb->transfer_buffer_length = bytes; 2004 urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset, 2005 GFP_KERNEL, 2006 &urb->transfer_dma); 2007 if (!urb->transfer_buffer) { 2008 usb_free_urb(urb); 2009 return NULL; 2010 } 2011 if (offset) { 2012 memset(urb->transfer_buffer, GUARD_BYTE, offset); 2013 urb->transfer_buffer += offset; 2014 urb->transfer_dma += offset; 2015 } 2016 /* For inbound transfers use guard byte so that test fails if 2017 data not correctly copied */ 2018 memset(urb->transfer_buffer, 2019 usb_pipein(urb->pipe) ? GUARD_BYTE : 0, 2020 bytes); 2021 2022 for (i = 0; i < packets; i++) { 2023 /* here, only the last packet will be short */ 2024 urb->iso_frame_desc[i].length = min_t(unsigned int, 2025 bytes, maxp); 2026 bytes -= urb->iso_frame_desc[i].length; 2027 2028 urb->iso_frame_desc[i].offset = maxp * i; 2029 } 2030 2031 urb->complete = complicated_callback; 2032 /* urb->context = SET BY CALLER */ 2033 urb->interval = 1 << (desc->bInterval - 1); 2034 urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; 2035 return urb; 2036 } 2037 2038 static int 2039 test_queue(struct usbtest_dev *dev, struct usbtest_param_32 *param, 2040 int pipe, struct usb_endpoint_descriptor *desc, unsigned offset) 2041 { 2042 struct transfer_context context; 2043 struct usb_device *udev; 2044 unsigned i; 2045 unsigned long packets = 0; 2046 int status = 0; 2047 struct urb **urbs; 2048 2049 if (!param->sglen || param->iterations > UINT_MAX / param->sglen) 2050 return -EINVAL; 2051 2052 if (param->sglen > MAX_SGLEN) 2053 return -EINVAL; 2054 2055 urbs = kcalloc(param->sglen, sizeof(*urbs), GFP_KERNEL); 2056 if (!urbs) 2057 return -ENOMEM; 2058 2059 memset(&context, 0, sizeof(context)); 2060 context.count = param->iterations * param->sglen; 2061 context.dev = dev; 2062 context.is_iso = !!desc; 2063 init_completion(&context.done); 2064 spin_lock_init(&context.lock); 2065 2066 udev = testdev_to_usbdev(dev); 2067 2068 for (i = 0; i < param->sglen; i++) { 2069 if (context.is_iso) 2070 urbs[i] = iso_alloc_urb(udev, pipe, desc, 2071 param->length, offset); 2072 else 2073 urbs[i] = complicated_alloc_urb(udev, pipe, 2074 param->length, 0); 2075 2076 if (!urbs[i]) { 2077 status = -ENOMEM; 2078 goto fail; 2079 } 2080 packets += urbs[i]->number_of_packets; 2081 urbs[i]->context = &context; 2082 } 2083 packets *= param->iterations; 2084 2085 if (context.is_iso) { 2086 int transaction_num; 2087 2088 if (udev->speed >= USB_SPEED_SUPER) 2089 transaction_num = ss_isoc_get_packet_num(udev, pipe); 2090 else 2091 transaction_num = usb_endpoint_maxp_mult(desc); 2092 2093 dev_info(&dev->intf->dev, 2094 "iso period %d %sframes, wMaxPacket %d, transactions: %d\n", 2095 1 << (desc->bInterval - 1), 2096 (udev->speed >= USB_SPEED_HIGH) ? "micro" : "", 2097 usb_endpoint_maxp(desc), 2098 transaction_num); 2099 2100 dev_info(&dev->intf->dev, 2101 "total %lu msec (%lu packets)\n", 2102 (packets * (1 << (desc->bInterval - 1))) 2103 / ((udev->speed >= USB_SPEED_HIGH) ? 8 : 1), 2104 packets); 2105 } 2106 2107 spin_lock_irq(&context.lock); 2108 for (i = 0; i < param->sglen; i++) { 2109 ++context.pending; 2110 status = usb_submit_urb(urbs[i], GFP_ATOMIC); 2111 if (status < 0) { 2112 ERROR(dev, "submit iso[%d], error %d\n", i, status); 2113 if (i == 0) { 2114 spin_unlock_irq(&context.lock); 2115 goto fail; 2116 } 2117 2118 simple_free_urb(urbs[i]); 2119 urbs[i] = NULL; 2120 context.pending--; 2121 context.submit_error = 1; 2122 break; 2123 } 2124 } 2125 spin_unlock_irq(&context.lock); 2126 2127 wait_for_completion(&context.done); 2128 2129 for (i = 0; i < param->sglen; i++) { 2130 if (urbs[i]) 2131 simple_free_urb(urbs[i]); 2132 } 2133 /* 2134 * Isochronous transfers are expected to fail sometimes. As an 2135 * arbitrary limit, we will report an error if any submissions 2136 * fail or if the transfer failure rate is > 10%. 2137 */ 2138 if (status != 0) 2139 ; 2140 else if (context.submit_error) 2141 status = -EACCES; 2142 else if (context.errors > 2143 (context.is_iso ? context.packet_count / 10 : 0)) 2144 status = -EIO; 2145 2146 kfree(urbs); 2147 return status; 2148 2149 fail: 2150 for (i = 0; i < param->sglen; i++) { 2151 if (urbs[i]) 2152 simple_free_urb(urbs[i]); 2153 } 2154 2155 kfree(urbs); 2156 return status; 2157 } 2158 2159 static int test_unaligned_bulk( 2160 struct usbtest_dev *tdev, 2161 int pipe, 2162 unsigned length, 2163 int iterations, 2164 unsigned transfer_flags, 2165 const char *label) 2166 { 2167 int retval; 2168 struct urb *urb = usbtest_alloc_urb(testdev_to_usbdev(tdev), 2169 pipe, length, transfer_flags, 1, 0, simple_callback); 2170 2171 if (!urb) 2172 return -ENOMEM; 2173 2174 retval = simple_io(tdev, urb, iterations, 0, 0, label); 2175 simple_free_urb(urb); 2176 return retval; 2177 } 2178 2179 /* Run tests. */ 2180 static int 2181 usbtest_do_ioctl(struct usb_interface *intf, struct usbtest_param_32 *param) 2182 { 2183 struct usbtest_dev *dev = usb_get_intfdata(intf); 2184 struct usb_device *udev = testdev_to_usbdev(dev); 2185 struct urb *urb; 2186 struct scatterlist *sg; 2187 struct usb_sg_request req; 2188 unsigned i; 2189 int retval = -EOPNOTSUPP; 2190 2191 if (param->iterations <= 0) 2192 return -EINVAL; 2193 if (param->sglen > MAX_SGLEN) 2194 return -EINVAL; 2195 /* 2196 * Just a bunch of test cases that every HCD is expected to handle. 2197 * 2198 * Some may need specific firmware, though it'd be good to have 2199 * one firmware image to handle all the test cases. 2200 * 2201 * FIXME add more tests! cancel requests, verify the data, control 2202 * queueing, concurrent read+write threads, and so on. 2203 */ 2204 switch (param->test_num) { 2205 2206 case 0: 2207 dev_info(&intf->dev, "TEST 0: NOP\n"); 2208 retval = 0; 2209 break; 2210 2211 /* Simple non-queued bulk I/O tests */ 2212 case 1: 2213 if (dev->out_pipe == 0) 2214 break; 2215 dev_info(&intf->dev, 2216 "TEST 1: write %d bytes %u times\n", 2217 param->length, param->iterations); 2218 urb = simple_alloc_urb(udev, dev->out_pipe, param->length, 0); 2219 if (!urb) { 2220 retval = -ENOMEM; 2221 break; 2222 } 2223 /* FIRMWARE: bulk sink (maybe accepts short writes) */ 2224 retval = simple_io(dev, urb, param->iterations, 0, 0, "test1"); 2225 simple_free_urb(urb); 2226 break; 2227 case 2: 2228 if (dev->in_pipe == 0) 2229 break; 2230 dev_info(&intf->dev, 2231 "TEST 2: read %d bytes %u times\n", 2232 param->length, param->iterations); 2233 urb = simple_alloc_urb(udev, dev->in_pipe, param->length, 0); 2234 if (!urb) { 2235 retval = -ENOMEM; 2236 break; 2237 } 2238 /* FIRMWARE: bulk source (maybe generates short writes) */ 2239 retval = simple_io(dev, urb, param->iterations, 0, 0, "test2"); 2240 simple_free_urb(urb); 2241 break; 2242 case 3: 2243 if (dev->out_pipe == 0 || param->vary == 0) 2244 break; 2245 dev_info(&intf->dev, 2246 "TEST 3: write/%d 0..%d bytes %u times\n", 2247 param->vary, param->length, param->iterations); 2248 urb = simple_alloc_urb(udev, dev->out_pipe, param->length, 0); 2249 if (!urb) { 2250 retval = -ENOMEM; 2251 break; 2252 } 2253 /* FIRMWARE: bulk sink (maybe accepts short writes) */ 2254 retval = simple_io(dev, urb, param->iterations, param->vary, 2255 0, "test3"); 2256 simple_free_urb(urb); 2257 break; 2258 case 4: 2259 if (dev->in_pipe == 0 || param->vary == 0) 2260 break; 2261 dev_info(&intf->dev, 2262 "TEST 4: read/%d 0..%d bytes %u times\n", 2263 param->vary, param->length, param->iterations); 2264 urb = simple_alloc_urb(udev, dev->in_pipe, param->length, 0); 2265 if (!urb) { 2266 retval = -ENOMEM; 2267 break; 2268 } 2269 /* FIRMWARE: bulk source (maybe generates short writes) */ 2270 retval = simple_io(dev, urb, param->iterations, param->vary, 2271 0, "test4"); 2272 simple_free_urb(urb); 2273 break; 2274 2275 /* Queued bulk I/O tests */ 2276 case 5: 2277 if (dev->out_pipe == 0 || param->sglen == 0) 2278 break; 2279 dev_info(&intf->dev, 2280 "TEST 5: write %d sglists %d entries of %d bytes\n", 2281 param->iterations, 2282 param->sglen, param->length); 2283 sg = alloc_sglist(param->sglen, param->length, 2284 0, dev, dev->out_pipe); 2285 if (!sg) { 2286 retval = -ENOMEM; 2287 break; 2288 } 2289 /* FIRMWARE: bulk sink (maybe accepts short writes) */ 2290 retval = perform_sglist(dev, param->iterations, dev->out_pipe, 2291 &req, sg, param->sglen); 2292 free_sglist(sg, param->sglen); 2293 break; 2294 2295 case 6: 2296 if (dev->in_pipe == 0 || param->sglen == 0) 2297 break; 2298 dev_info(&intf->dev, 2299 "TEST 6: read %d sglists %d entries of %d bytes\n", 2300 param->iterations, 2301 param->sglen, param->length); 2302 sg = alloc_sglist(param->sglen, param->length, 2303 0, dev, dev->in_pipe); 2304 if (!sg) { 2305 retval = -ENOMEM; 2306 break; 2307 } 2308 /* FIRMWARE: bulk source (maybe generates short writes) */ 2309 retval = perform_sglist(dev, param->iterations, dev->in_pipe, 2310 &req, sg, param->sglen); 2311 free_sglist(sg, param->sglen); 2312 break; 2313 case 7: 2314 if (dev->out_pipe == 0 || param->sglen == 0 || param->vary == 0) 2315 break; 2316 dev_info(&intf->dev, 2317 "TEST 7: write/%d %d sglists %d entries 0..%d bytes\n", 2318 param->vary, param->iterations, 2319 param->sglen, param->length); 2320 sg = alloc_sglist(param->sglen, param->length, 2321 param->vary, dev, dev->out_pipe); 2322 if (!sg) { 2323 retval = -ENOMEM; 2324 break; 2325 } 2326 /* FIRMWARE: bulk sink (maybe accepts short writes) */ 2327 retval = perform_sglist(dev, param->iterations, dev->out_pipe, 2328 &req, sg, param->sglen); 2329 free_sglist(sg, param->sglen); 2330 break; 2331 case 8: 2332 if (dev->in_pipe == 0 || param->sglen == 0 || param->vary == 0) 2333 break; 2334 dev_info(&intf->dev, 2335 "TEST 8: read/%d %d sglists %d entries 0..%d bytes\n", 2336 param->vary, param->iterations, 2337 param->sglen, param->length); 2338 sg = alloc_sglist(param->sglen, param->length, 2339 param->vary, dev, dev->in_pipe); 2340 if (!sg) { 2341 retval = -ENOMEM; 2342 break; 2343 } 2344 /* FIRMWARE: bulk source (maybe generates short writes) */ 2345 retval = perform_sglist(dev, param->iterations, dev->in_pipe, 2346 &req, sg, param->sglen); 2347 free_sglist(sg, param->sglen); 2348 break; 2349 2350 /* non-queued sanity tests for control (chapter 9 subset) */ 2351 case 9: 2352 retval = 0; 2353 dev_info(&intf->dev, 2354 "TEST 9: ch9 (subset) control tests, %d times\n", 2355 param->iterations); 2356 for (i = param->iterations; retval == 0 && i--; /* NOP */) 2357 retval = ch9_postconfig(dev); 2358 if (retval) 2359 dev_err(&intf->dev, "ch9 subset failed, " 2360 "iterations left %d\n", i); 2361 break; 2362 2363 /* queued control messaging */ 2364 case 10: 2365 retval = 0; 2366 dev_info(&intf->dev, 2367 "TEST 10: queue %d control calls, %d times\n", 2368 param->sglen, 2369 param->iterations); 2370 retval = test_ctrl_queue(dev, param); 2371 break; 2372 2373 /* simple non-queued unlinks (ring with one urb) */ 2374 case 11: 2375 if (dev->in_pipe == 0 || !param->length) 2376 break; 2377 retval = 0; 2378 dev_info(&intf->dev, "TEST 11: unlink %d reads of %d\n", 2379 param->iterations, param->length); 2380 for (i = param->iterations; retval == 0 && i--; /* NOP */) 2381 retval = unlink_simple(dev, dev->in_pipe, 2382 param->length); 2383 if (retval) 2384 dev_err(&intf->dev, "unlink reads failed %d, " 2385 "iterations left %d\n", retval, i); 2386 break; 2387 case 12: 2388 if (dev->out_pipe == 0 || !param->length) 2389 break; 2390 retval = 0; 2391 dev_info(&intf->dev, "TEST 12: unlink %d writes of %d\n", 2392 param->iterations, param->length); 2393 for (i = param->iterations; retval == 0 && i--; /* NOP */) 2394 retval = unlink_simple(dev, dev->out_pipe, 2395 param->length); 2396 if (retval) 2397 dev_err(&intf->dev, "unlink writes failed %d, " 2398 "iterations left %d\n", retval, i); 2399 break; 2400 2401 /* ep halt tests */ 2402 case 13: 2403 if (dev->out_pipe == 0 && dev->in_pipe == 0) 2404 break; 2405 retval = 0; 2406 dev_info(&intf->dev, "TEST 13: set/clear %d halts\n", 2407 param->iterations); 2408 for (i = param->iterations; retval == 0 && i--; /* NOP */) 2409 retval = halt_simple(dev); 2410 2411 if (retval) 2412 ERROR(dev, "halts failed, iterations left %d\n", i); 2413 break; 2414 2415 /* control write tests */ 2416 case 14: 2417 if (!dev->info->ctrl_out) 2418 break; 2419 dev_info(&intf->dev, "TEST 14: %d ep0out, %d..%d vary %d\n", 2420 param->iterations, 2421 realworld ? 1 : 0, param->length, 2422 param->vary); 2423 retval = ctrl_out(dev, param->iterations, 2424 param->length, param->vary, 0); 2425 break; 2426 2427 /* iso write tests */ 2428 case 15: 2429 if (dev->out_iso_pipe == 0 || param->sglen == 0) 2430 break; 2431 dev_info(&intf->dev, 2432 "TEST 15: write %d iso, %d entries of %d bytes\n", 2433 param->iterations, 2434 param->sglen, param->length); 2435 /* FIRMWARE: iso sink */ 2436 retval = test_queue(dev, param, 2437 dev->out_iso_pipe, dev->iso_out, 0); 2438 break; 2439 2440 /* iso read tests */ 2441 case 16: 2442 if (dev->in_iso_pipe == 0 || param->sglen == 0) 2443 break; 2444 dev_info(&intf->dev, 2445 "TEST 16: read %d iso, %d entries of %d bytes\n", 2446 param->iterations, 2447 param->sglen, param->length); 2448 /* FIRMWARE: iso source */ 2449 retval = test_queue(dev, param, 2450 dev->in_iso_pipe, dev->iso_in, 0); 2451 break; 2452 2453 /* FIXME scatterlist cancel (needs helper thread) */ 2454 2455 /* Tests for bulk I/O using DMA mapping by core and odd address */ 2456 case 17: 2457 if (dev->out_pipe == 0) 2458 break; 2459 dev_info(&intf->dev, 2460 "TEST 17: write odd addr %d bytes %u times core map\n", 2461 param->length, param->iterations); 2462 2463 retval = test_unaligned_bulk( 2464 dev, dev->out_pipe, 2465 param->length, param->iterations, 2466 0, "test17"); 2467 break; 2468 2469 case 18: 2470 if (dev->in_pipe == 0) 2471 break; 2472 dev_info(&intf->dev, 2473 "TEST 18: read odd addr %d bytes %u times core map\n", 2474 param->length, param->iterations); 2475 2476 retval = test_unaligned_bulk( 2477 dev, dev->in_pipe, 2478 param->length, param->iterations, 2479 0, "test18"); 2480 break; 2481 2482 /* Tests for bulk I/O using premapped coherent buffer and odd address */ 2483 case 19: 2484 if (dev->out_pipe == 0) 2485 break; 2486 dev_info(&intf->dev, 2487 "TEST 19: write odd addr %d bytes %u times premapped\n", 2488 param->length, param->iterations); 2489 2490 retval = test_unaligned_bulk( 2491 dev, dev->out_pipe, 2492 param->length, param->iterations, 2493 URB_NO_TRANSFER_DMA_MAP, "test19"); 2494 break; 2495 2496 case 20: 2497 if (dev->in_pipe == 0) 2498 break; 2499 dev_info(&intf->dev, 2500 "TEST 20: read odd addr %d bytes %u times premapped\n", 2501 param->length, param->iterations); 2502 2503 retval = test_unaligned_bulk( 2504 dev, dev->in_pipe, 2505 param->length, param->iterations, 2506 URB_NO_TRANSFER_DMA_MAP, "test20"); 2507 break; 2508 2509 /* control write tests with unaligned buffer */ 2510 case 21: 2511 if (!dev->info->ctrl_out) 2512 break; 2513 dev_info(&intf->dev, 2514 "TEST 21: %d ep0out odd addr, %d..%d vary %d\n", 2515 param->iterations, 2516 realworld ? 1 : 0, param->length, 2517 param->vary); 2518 retval = ctrl_out(dev, param->iterations, 2519 param->length, param->vary, 1); 2520 break; 2521 2522 /* unaligned iso tests */ 2523 case 22: 2524 if (dev->out_iso_pipe == 0 || param->sglen == 0) 2525 break; 2526 dev_info(&intf->dev, 2527 "TEST 22: write %d iso odd, %d entries of %d bytes\n", 2528 param->iterations, 2529 param->sglen, param->length); 2530 retval = test_queue(dev, param, 2531 dev->out_iso_pipe, dev->iso_out, 1); 2532 break; 2533 2534 case 23: 2535 if (dev->in_iso_pipe == 0 || param->sglen == 0) 2536 break; 2537 dev_info(&intf->dev, 2538 "TEST 23: read %d iso odd, %d entries of %d bytes\n", 2539 param->iterations, 2540 param->sglen, param->length); 2541 retval = test_queue(dev, param, 2542 dev->in_iso_pipe, dev->iso_in, 1); 2543 break; 2544 2545 /* unlink URBs from a bulk-OUT queue */ 2546 case 24: 2547 if (dev->out_pipe == 0 || !param->length || param->sglen < 4) 2548 break; 2549 retval = 0; 2550 dev_info(&intf->dev, "TEST 24: unlink from %d queues of " 2551 "%d %d-byte writes\n", 2552 param->iterations, param->sglen, param->length); 2553 for (i = param->iterations; retval == 0 && i > 0; --i) { 2554 retval = unlink_queued(dev, dev->out_pipe, 2555 param->sglen, param->length); 2556 if (retval) { 2557 dev_err(&intf->dev, 2558 "unlink queued writes failed %d, " 2559 "iterations left %d\n", retval, i); 2560 break; 2561 } 2562 } 2563 break; 2564 2565 /* Simple non-queued interrupt I/O tests */ 2566 case 25: 2567 if (dev->out_int_pipe == 0) 2568 break; 2569 dev_info(&intf->dev, 2570 "TEST 25: write %d bytes %u times\n", 2571 param->length, param->iterations); 2572 urb = simple_alloc_urb(udev, dev->out_int_pipe, param->length, 2573 dev->int_out->bInterval); 2574 if (!urb) { 2575 retval = -ENOMEM; 2576 break; 2577 } 2578 /* FIRMWARE: interrupt sink (maybe accepts short writes) */ 2579 retval = simple_io(dev, urb, param->iterations, 0, 0, "test25"); 2580 simple_free_urb(urb); 2581 break; 2582 case 26: 2583 if (dev->in_int_pipe == 0) 2584 break; 2585 dev_info(&intf->dev, 2586 "TEST 26: read %d bytes %u times\n", 2587 param->length, param->iterations); 2588 urb = simple_alloc_urb(udev, dev->in_int_pipe, param->length, 2589 dev->int_in->bInterval); 2590 if (!urb) { 2591 retval = -ENOMEM; 2592 break; 2593 } 2594 /* FIRMWARE: interrupt source (maybe generates short writes) */ 2595 retval = simple_io(dev, urb, param->iterations, 0, 0, "test26"); 2596 simple_free_urb(urb); 2597 break; 2598 case 27: 2599 /* We do performance test, so ignore data compare */ 2600 if (dev->out_pipe == 0 || param->sglen == 0 || pattern != 0) 2601 break; 2602 dev_info(&intf->dev, 2603 "TEST 27: bulk write %dMbytes\n", (param->iterations * 2604 param->sglen * param->length) / (1024 * 1024)); 2605 retval = test_queue(dev, param, 2606 dev->out_pipe, NULL, 0); 2607 break; 2608 case 28: 2609 if (dev->in_pipe == 0 || param->sglen == 0 || pattern != 0) 2610 break; 2611 dev_info(&intf->dev, 2612 "TEST 28: bulk read %dMbytes\n", (param->iterations * 2613 param->sglen * param->length) / (1024 * 1024)); 2614 retval = test_queue(dev, param, 2615 dev->in_pipe, NULL, 0); 2616 break; 2617 /* Test data Toggle/seq_nr clear between bulk out transfers */ 2618 case 29: 2619 if (dev->out_pipe == 0) 2620 break; 2621 retval = 0; 2622 dev_info(&intf->dev, "TEST 29: Clear toggle between bulk writes %d times\n", 2623 param->iterations); 2624 for (i = param->iterations; retval == 0 && i > 0; --i) 2625 retval = toggle_sync_simple(dev); 2626 2627 if (retval) 2628 ERROR(dev, "toggle sync failed, iterations left %d\n", 2629 i); 2630 break; 2631 } 2632 return retval; 2633 } 2634 2635 /*-------------------------------------------------------------------------*/ 2636 2637 /* We only have this one interface to user space, through usbfs. 2638 * User mode code can scan usbfs to find N different devices (maybe on 2639 * different busses) to use when testing, and allocate one thread per 2640 * test. So discovery is simplified, and we have no device naming issues. 2641 * 2642 * Don't use these only as stress/load tests. Use them along with 2643 * other USB bus activity: plugging, unplugging, mousing, mp3 playback, 2644 * video capture, and so on. Run different tests at different times, in 2645 * different sequences. Nothing here should interact with other devices, 2646 * except indirectly by consuming USB bandwidth and CPU resources for test 2647 * threads and request completion. But the only way to know that for sure 2648 * is to test when HC queues are in use by many devices. 2649 * 2650 * WARNING: Because usbfs grabs udev->dev.sem before calling this ioctl(), 2651 * it locks out usbcore in certain code paths. Notably, if you disconnect 2652 * the device-under-test, hub_wq will wait block forever waiting for the 2653 * ioctl to complete ... so that usb_disconnect() can abort the pending 2654 * urbs and then call usbtest_disconnect(). To abort a test, you're best 2655 * off just killing the userspace task and waiting for it to exit. 2656 */ 2657 2658 static int 2659 usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf) 2660 { 2661 2662 struct usbtest_dev *dev = usb_get_intfdata(intf); 2663 struct usbtest_param_64 *param_64 = buf; 2664 struct usbtest_param_32 temp; 2665 struct usbtest_param_32 *param_32 = buf; 2666 struct timespec64 start; 2667 struct timespec64 end; 2668 struct timespec64 duration; 2669 int retval = -EOPNOTSUPP; 2670 2671 /* FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is. */ 2672 2673 pattern = mod_pattern; 2674 2675 if (mutex_lock_interruptible(&dev->lock)) 2676 return -ERESTARTSYS; 2677 2678 /* FIXME: What if a system sleep starts while a test is running? */ 2679 2680 /* some devices, like ez-usb default devices, need a non-default 2681 * altsetting to have any active endpoints. some tests change 2682 * altsettings; force a default so most tests don't need to check. 2683 */ 2684 if (dev->info->alt >= 0) { 2685 if (intf->altsetting->desc.bInterfaceNumber) { 2686 retval = -ENODEV; 2687 goto free_mutex; 2688 } 2689 retval = set_altsetting(dev, dev->info->alt); 2690 if (retval) { 2691 dev_err(&intf->dev, 2692 "set altsetting to %d failed, %d\n", 2693 dev->info->alt, retval); 2694 goto free_mutex; 2695 } 2696 } 2697 2698 switch (code) { 2699 case USBTEST_REQUEST_64: 2700 temp.test_num = param_64->test_num; 2701 temp.iterations = param_64->iterations; 2702 temp.length = param_64->length; 2703 temp.sglen = param_64->sglen; 2704 temp.vary = param_64->vary; 2705 param_32 = &temp; 2706 break; 2707 2708 case USBTEST_REQUEST_32: 2709 break; 2710 2711 default: 2712 retval = -EOPNOTSUPP; 2713 goto free_mutex; 2714 } 2715 2716 ktime_get_ts64(&start); 2717 2718 retval = usbtest_do_ioctl(intf, param_32); 2719 if (retval < 0) 2720 goto free_mutex; 2721 2722 ktime_get_ts64(&end); 2723 2724 duration = timespec64_sub(end, start); 2725 2726 temp.duration_sec = duration.tv_sec; 2727 temp.duration_usec = duration.tv_nsec/NSEC_PER_USEC; 2728 2729 switch (code) { 2730 case USBTEST_REQUEST_32: 2731 param_32->duration_sec = temp.duration_sec; 2732 param_32->duration_usec = temp.duration_usec; 2733 break; 2734 2735 case USBTEST_REQUEST_64: 2736 param_64->duration_sec = temp.duration_sec; 2737 param_64->duration_usec = temp.duration_usec; 2738 break; 2739 } 2740 2741 free_mutex: 2742 mutex_unlock(&dev->lock); 2743 return retval; 2744 } 2745 2746 /*-------------------------------------------------------------------------*/ 2747 2748 static unsigned force_interrupt; 2749 module_param(force_interrupt, uint, 0); 2750 MODULE_PARM_DESC(force_interrupt, "0 = test default; else interrupt"); 2751 2752 #ifdef GENERIC 2753 static unsigned short vendor; 2754 module_param(vendor, ushort, 0); 2755 MODULE_PARM_DESC(vendor, "vendor code (from usb-if)"); 2756 2757 static unsigned short product; 2758 module_param(product, ushort, 0); 2759 MODULE_PARM_DESC(product, "product code (from vendor)"); 2760 #endif 2761 2762 static int 2763 usbtest_probe(struct usb_interface *intf, const struct usb_device_id *id) 2764 { 2765 struct usb_device *udev; 2766 struct usbtest_dev *dev; 2767 struct usbtest_info *info; 2768 char *rtest, *wtest; 2769 char *irtest, *iwtest; 2770 char *intrtest, *intwtest; 2771 2772 udev = interface_to_usbdev(intf); 2773 2774 #ifdef GENERIC 2775 /* specify devices by module parameters? */ 2776 if (id->match_flags == 0) { 2777 /* vendor match required, product match optional */ 2778 if (!vendor || le16_to_cpu(udev->descriptor.idVendor) != (u16)vendor) 2779 return -ENODEV; 2780 if (product && le16_to_cpu(udev->descriptor.idProduct) != (u16)product) 2781 return -ENODEV; 2782 dev_info(&intf->dev, "matched module params, " 2783 "vend=0x%04x prod=0x%04x\n", 2784 le16_to_cpu(udev->descriptor.idVendor), 2785 le16_to_cpu(udev->descriptor.idProduct)); 2786 } 2787 #endif 2788 2789 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2790 if (!dev) 2791 return -ENOMEM; 2792 info = (struct usbtest_info *) id->driver_info; 2793 dev->info = info; 2794 mutex_init(&dev->lock); 2795 2796 dev->intf = intf; 2797 2798 /* cacheline-aligned scratch for i/o */ 2799 dev->buf = kmalloc(TBUF_SIZE, GFP_KERNEL); 2800 if (dev->buf == NULL) { 2801 kfree(dev); 2802 return -ENOMEM; 2803 } 2804 2805 /* NOTE this doesn't yet test the handful of difference that are 2806 * visible with high speed interrupts: bigger maxpacket (1K) and 2807 * "high bandwidth" modes (up to 3 packets/uframe). 2808 */ 2809 rtest = wtest = ""; 2810 irtest = iwtest = ""; 2811 intrtest = intwtest = ""; 2812 if (force_interrupt || udev->speed == USB_SPEED_LOW) { 2813 if (info->ep_in) { 2814 dev->in_pipe = usb_rcvintpipe(udev, info->ep_in); 2815 rtest = " intr-in"; 2816 } 2817 if (info->ep_out) { 2818 dev->out_pipe = usb_sndintpipe(udev, info->ep_out); 2819 wtest = " intr-out"; 2820 } 2821 } else { 2822 if (override_alt >= 0 || info->autoconf) { 2823 int status; 2824 2825 status = get_endpoints(dev, intf); 2826 if (status < 0) { 2827 WARNING(dev, "couldn't get endpoints, %d\n", 2828 status); 2829 kfree(dev->buf); 2830 kfree(dev); 2831 return status; 2832 } 2833 /* may find bulk or ISO pipes */ 2834 } else { 2835 if (info->ep_in) 2836 dev->in_pipe = usb_rcvbulkpipe(udev, 2837 info->ep_in); 2838 if (info->ep_out) 2839 dev->out_pipe = usb_sndbulkpipe(udev, 2840 info->ep_out); 2841 } 2842 if (dev->in_pipe) 2843 rtest = " bulk-in"; 2844 if (dev->out_pipe) 2845 wtest = " bulk-out"; 2846 if (dev->in_iso_pipe) 2847 irtest = " iso-in"; 2848 if (dev->out_iso_pipe) 2849 iwtest = " iso-out"; 2850 if (dev->in_int_pipe) 2851 intrtest = " int-in"; 2852 if (dev->out_int_pipe) 2853 intwtest = " int-out"; 2854 } 2855 2856 usb_set_intfdata(intf, dev); 2857 dev_info(&intf->dev, "%s\n", info->name); 2858 dev_info(&intf->dev, "%s {control%s%s%s%s%s%s%s} tests%s\n", 2859 usb_speed_string(udev->speed), 2860 info->ctrl_out ? " in/out" : "", 2861 rtest, wtest, 2862 irtest, iwtest, 2863 intrtest, intwtest, 2864 info->alt >= 0 ? " (+alt)" : ""); 2865 return 0; 2866 } 2867 2868 static int usbtest_suspend(struct usb_interface *intf, pm_message_t message) 2869 { 2870 return 0; 2871 } 2872 2873 static int usbtest_resume(struct usb_interface *intf) 2874 { 2875 return 0; 2876 } 2877 2878 2879 static void usbtest_disconnect(struct usb_interface *intf) 2880 { 2881 struct usbtest_dev *dev = usb_get_intfdata(intf); 2882 2883 usb_set_intfdata(intf, NULL); 2884 dev_dbg(&intf->dev, "disconnect\n"); 2885 kfree(dev->buf); 2886 kfree(dev); 2887 } 2888 2889 /* Basic testing only needs a device that can source or sink bulk traffic. 2890 * Any device can test control transfers (default with GENERIC binding). 2891 * 2892 * Several entries work with the default EP0 implementation that's built 2893 * into EZ-USB chips. There's a default vendor ID which can be overridden 2894 * by (very) small config EEPROMS, but otherwise all these devices act 2895 * identically until firmware is loaded: only EP0 works. It turns out 2896 * to be easy to make other endpoints work, without modifying that EP0 2897 * behavior. For now, we expect that kind of firmware. 2898 */ 2899 2900 /* an21xx or fx versions of ez-usb */ 2901 static struct usbtest_info ez1_info = { 2902 .name = "EZ-USB device", 2903 .ep_in = 2, 2904 .ep_out = 2, 2905 .alt = 1, 2906 }; 2907 2908 /* fx2 version of ez-usb */ 2909 static struct usbtest_info ez2_info = { 2910 .name = "FX2 device", 2911 .ep_in = 6, 2912 .ep_out = 2, 2913 .alt = 1, 2914 }; 2915 2916 /* ezusb family device with dedicated usb test firmware, 2917 */ 2918 static struct usbtest_info fw_info = { 2919 .name = "usb test device", 2920 .ep_in = 2, 2921 .ep_out = 2, 2922 .alt = 1, 2923 .autoconf = 1, /* iso and ctrl_out need autoconf */ 2924 .ctrl_out = 1, 2925 .iso = 1, /* iso_ep's are #8 in/out */ 2926 }; 2927 2928 /* peripheral running Linux and 'zero.c' test firmware, or 2929 * its user-mode cousin. different versions of this use 2930 * different hardware with the same vendor/product codes. 2931 * host side MUST rely on the endpoint descriptors. 2932 */ 2933 static struct usbtest_info gz_info = { 2934 .name = "Linux gadget zero", 2935 .autoconf = 1, 2936 .ctrl_out = 1, 2937 .iso = 1, 2938 .intr = 1, 2939 .alt = 0, 2940 }; 2941 2942 static struct usbtest_info um_info = { 2943 .name = "Linux user mode test driver", 2944 .autoconf = 1, 2945 .alt = -1, 2946 }; 2947 2948 static struct usbtest_info um2_info = { 2949 .name = "Linux user mode ISO test driver", 2950 .autoconf = 1, 2951 .iso = 1, 2952 .alt = -1, 2953 }; 2954 2955 #ifdef IBOT2 2956 /* this is a nice source of high speed bulk data; 2957 * uses an FX2, with firmware provided in the device 2958 */ 2959 static struct usbtest_info ibot2_info = { 2960 .name = "iBOT2 webcam", 2961 .ep_in = 2, 2962 .alt = -1, 2963 }; 2964 #endif 2965 2966 #ifdef GENERIC 2967 /* we can use any device to test control traffic */ 2968 static struct usbtest_info generic_info = { 2969 .name = "Generic USB device", 2970 .alt = -1, 2971 }; 2972 #endif 2973 2974 2975 static const struct usb_device_id id_table[] = { 2976 2977 /*-------------------------------------------------------------*/ 2978 2979 /* EZ-USB devices which download firmware to replace (or in our 2980 * case augment) the default device implementation. 2981 */ 2982 2983 /* generic EZ-USB FX controller */ 2984 { USB_DEVICE(0x0547, 0x2235), 2985 .driver_info = (unsigned long) &ez1_info, 2986 }, 2987 2988 /* CY3671 development board with EZ-USB FX */ 2989 { USB_DEVICE(0x0547, 0x0080), 2990 .driver_info = (unsigned long) &ez1_info, 2991 }, 2992 2993 /* generic EZ-USB FX2 controller (or development board) */ 2994 { USB_DEVICE(0x04b4, 0x8613), 2995 .driver_info = (unsigned long) &ez2_info, 2996 }, 2997 2998 /* re-enumerated usb test device firmware */ 2999 { USB_DEVICE(0xfff0, 0xfff0), 3000 .driver_info = (unsigned long) &fw_info, 3001 }, 3002 3003 /* "Gadget Zero" firmware runs under Linux */ 3004 { USB_DEVICE(0x0525, 0xa4a0), 3005 .driver_info = (unsigned long) &gz_info, 3006 }, 3007 3008 /* so does a user-mode variant */ 3009 { USB_DEVICE(0x0525, 0xa4a4), 3010 .driver_info = (unsigned long) &um_info, 3011 }, 3012 3013 /* ... and a user-mode variant that talks iso */ 3014 { USB_DEVICE(0x0525, 0xa4a3), 3015 .driver_info = (unsigned long) &um2_info, 3016 }, 3017 3018 #ifdef KEYSPAN_19Qi 3019 /* Keyspan 19qi uses an21xx (original EZ-USB) */ 3020 /* this does not coexist with the real Keyspan 19qi driver! */ 3021 { USB_DEVICE(0x06cd, 0x010b), 3022 .driver_info = (unsigned long) &ez1_info, 3023 }, 3024 #endif 3025 3026 /*-------------------------------------------------------------*/ 3027 3028 #ifdef IBOT2 3029 /* iBOT2 makes a nice source of high speed bulk-in data */ 3030 /* this does not coexist with a real iBOT2 driver! */ 3031 { USB_DEVICE(0x0b62, 0x0059), 3032 .driver_info = (unsigned long) &ibot2_info, 3033 }, 3034 #endif 3035 3036 /*-------------------------------------------------------------*/ 3037 3038 #ifdef GENERIC 3039 /* module params can specify devices to use for control tests */ 3040 { .driver_info = (unsigned long) &generic_info, }, 3041 #endif 3042 3043 /*-------------------------------------------------------------*/ 3044 3045 { } 3046 }; 3047 MODULE_DEVICE_TABLE(usb, id_table); 3048 3049 static struct usb_driver usbtest_driver = { 3050 .name = "usbtest", 3051 .id_table = id_table, 3052 .probe = usbtest_probe, 3053 .unlocked_ioctl = usbtest_ioctl, 3054 .disconnect = usbtest_disconnect, 3055 .suspend = usbtest_suspend, 3056 .resume = usbtest_resume, 3057 }; 3058 3059 /*-------------------------------------------------------------------------*/ 3060 3061 static int __init usbtest_init(void) 3062 { 3063 #ifdef GENERIC 3064 if (vendor) 3065 pr_debug("params: vend=0x%04x prod=0x%04x\n", vendor, product); 3066 #endif 3067 return usb_register(&usbtest_driver); 3068 } 3069 module_init(usbtest_init); 3070 3071 static void __exit usbtest_exit(void) 3072 { 3073 usb_deregister(&usbtest_driver); 3074 } 3075 module_exit(usbtest_exit); 3076 3077 MODULE_DESCRIPTION("USB Core/HCD Testing Driver"); 3078 MODULE_LICENSE("GPL"); 3079 3080