xref: /linux/drivers/usb/misc/usbtest.c (revision d593b5413d13be31782385bf5b27af3b3bad59eb)
1 #include <linux/kernel.h>
2 #include <linux/errno.h>
3 #include <linux/init.h>
4 #include <linux/slab.h>
5 #include <linux/mm.h>
6 #include <linux/module.h>
7 #include <linux/moduleparam.h>
8 #include <linux/scatterlist.h>
9 #include <linux/mutex.h>
10 
11 #include <linux/usb.h>
12 
13 
14 /*-------------------------------------------------------------------------*/
15 
16 /* FIXME make these public somewhere; usbdevfs.h? */
17 struct usbtest_param {
18 	/* inputs */
19 	unsigned		test_num;	/* 0..(TEST_CASES-1) */
20 	unsigned		iterations;
21 	unsigned		length;
22 	unsigned		vary;
23 	unsigned		sglen;
24 
25 	/* outputs */
26 	struct timeval		duration;
27 };
28 #define USBTEST_REQUEST	_IOWR('U', 100, struct usbtest_param)
29 
30 /*-------------------------------------------------------------------------*/
31 
32 #define	GENERIC		/* let probe() bind using module params */
33 
34 /* Some devices that can be used for testing will have "real" drivers.
35  * Entries for those need to be enabled here by hand, after disabling
36  * that "real" driver.
37  */
38 //#define	IBOT2		/* grab iBOT2 webcams */
39 //#define	KEYSPAN_19Qi	/* grab un-renumerated serial adapter */
40 
41 /*-------------------------------------------------------------------------*/
42 
43 struct usbtest_info {
44 	const char		*name;
45 	u8			ep_in;		/* bulk/intr source */
46 	u8			ep_out;		/* bulk/intr sink */
47 	unsigned		autoconf:1;
48 	unsigned		ctrl_out:1;
49 	unsigned		iso:1;		/* try iso in/out */
50 	int			alt;
51 };
52 
53 /* this is accessed only through usbfs ioctl calls.
54  * one ioctl to issue a test ... one lock per device.
55  * tests create other threads if they need them.
56  * urbs and buffers are allocated dynamically,
57  * and data generated deterministically.
58  */
59 struct usbtest_dev {
60 	struct usb_interface	*intf;
61 	struct usbtest_info	*info;
62 	int			in_pipe;
63 	int			out_pipe;
64 	int			in_iso_pipe;
65 	int			out_iso_pipe;
66 	struct usb_endpoint_descriptor	*iso_in, *iso_out;
67 	struct mutex		lock;
68 
69 #define TBUF_SIZE	256
70 	u8			*buf;
71 };
72 
73 static struct usb_device *testdev_to_usbdev(struct usbtest_dev *test)
74 {
75 	return interface_to_usbdev(test->intf);
76 }
77 
78 /* set up all urbs so they can be used with either bulk or interrupt */
79 #define	INTERRUPT_RATE		1	/* msec/transfer */
80 
81 #define ERROR(tdev, fmt, args...) \
82 	dev_err(&(tdev)->intf->dev , fmt , ## args)
83 #define WARNING(tdev, fmt, args...) \
84 	dev_warn(&(tdev)->intf->dev , fmt , ## args)
85 
86 #define GUARD_BYTE	0xA5
87 
88 /*-------------------------------------------------------------------------*/
89 
90 static int
91 get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf)
92 {
93 	int				tmp;
94 	struct usb_host_interface	*alt;
95 	struct usb_host_endpoint	*in, *out;
96 	struct usb_host_endpoint	*iso_in, *iso_out;
97 	struct usb_device		*udev;
98 
99 	for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
100 		unsigned	ep;
101 
102 		in = out = NULL;
103 		iso_in = iso_out = NULL;
104 		alt = intf->altsetting + tmp;
105 
106 		/* take the first altsetting with in-bulk + out-bulk;
107 		 * ignore other endpoints and altsettings.
108 		 */
109 		for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
110 			struct usb_host_endpoint	*e;
111 
112 			e = alt->endpoint + ep;
113 			switch (e->desc.bmAttributes) {
114 			case USB_ENDPOINT_XFER_BULK:
115 				break;
116 			case USB_ENDPOINT_XFER_ISOC:
117 				if (dev->info->iso)
118 					goto try_iso;
119 				/* FALLTHROUGH */
120 			default:
121 				continue;
122 			}
123 			if (usb_endpoint_dir_in(&e->desc)) {
124 				if (!in)
125 					in = e;
126 			} else {
127 				if (!out)
128 					out = e;
129 			}
130 			continue;
131 try_iso:
132 			if (usb_endpoint_dir_in(&e->desc)) {
133 				if (!iso_in)
134 					iso_in = e;
135 			} else {
136 				if (!iso_out)
137 					iso_out = e;
138 			}
139 		}
140 		if ((in && out)  ||  iso_in || iso_out)
141 			goto found;
142 	}
143 	return -EINVAL;
144 
145 found:
146 	udev = testdev_to_usbdev(dev);
147 	if (alt->desc.bAlternateSetting != 0) {
148 		tmp = usb_set_interface(udev,
149 				alt->desc.bInterfaceNumber,
150 				alt->desc.bAlternateSetting);
151 		if (tmp < 0)
152 			return tmp;
153 	}
154 
155 	if (in) {
156 		dev->in_pipe = usb_rcvbulkpipe(udev,
157 			in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
158 		dev->out_pipe = usb_sndbulkpipe(udev,
159 			out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
160 	}
161 	if (iso_in) {
162 		dev->iso_in = &iso_in->desc;
163 		dev->in_iso_pipe = usb_rcvisocpipe(udev,
164 				iso_in->desc.bEndpointAddress
165 					& USB_ENDPOINT_NUMBER_MASK);
166 	}
167 
168 	if (iso_out) {
169 		dev->iso_out = &iso_out->desc;
170 		dev->out_iso_pipe = usb_sndisocpipe(udev,
171 				iso_out->desc.bEndpointAddress
172 					& USB_ENDPOINT_NUMBER_MASK);
173 	}
174 	return 0;
175 }
176 
177 /*-------------------------------------------------------------------------*/
178 
179 /* Support for testing basic non-queued I/O streams.
180  *
181  * These just package urbs as requests that can be easily canceled.
182  * Each urb's data buffer is dynamically allocated; callers can fill
183  * them with non-zero test data (or test for it) when appropriate.
184  */
185 
186 static void simple_callback(struct urb *urb)
187 {
188 	complete(urb->context);
189 }
190 
191 static struct urb *usbtest_alloc_urb(
192 	struct usb_device	*udev,
193 	int			pipe,
194 	unsigned long		bytes,
195 	unsigned		transfer_flags,
196 	unsigned		offset)
197 {
198 	struct urb		*urb;
199 
200 	urb = usb_alloc_urb(0, GFP_KERNEL);
201 	if (!urb)
202 		return urb;
203 	usb_fill_bulk_urb(urb, udev, pipe, NULL, bytes, simple_callback, NULL);
204 	urb->interval = (udev->speed == USB_SPEED_HIGH)
205 			? (INTERRUPT_RATE << 3)
206 			: INTERRUPT_RATE;
207 	urb->transfer_flags = transfer_flags;
208 	if (usb_pipein(pipe))
209 		urb->transfer_flags |= URB_SHORT_NOT_OK;
210 
211 	if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
212 		urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
213 			GFP_KERNEL, &urb->transfer_dma);
214 	else
215 		urb->transfer_buffer = kmalloc(bytes + offset, GFP_KERNEL);
216 
217 	if (!urb->transfer_buffer) {
218 		usb_free_urb(urb);
219 		return NULL;
220 	}
221 
222 	/* To test unaligned transfers add an offset and fill the
223 		unused memory with a guard value */
224 	if (offset) {
225 		memset(urb->transfer_buffer, GUARD_BYTE, offset);
226 		urb->transfer_buffer += offset;
227 		if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
228 			urb->transfer_dma += offset;
229 	}
230 
231 	/* For inbound transfers use guard byte so that test fails if
232 		data not correctly copied */
233 	memset(urb->transfer_buffer,
234 			usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
235 			bytes);
236 	return urb;
237 }
238 
239 static struct urb *simple_alloc_urb(
240 	struct usb_device	*udev,
241 	int			pipe,
242 	unsigned long		bytes)
243 {
244 	return usbtest_alloc_urb(udev, pipe, bytes, URB_NO_TRANSFER_DMA_MAP, 0);
245 }
246 
247 static unsigned pattern;
248 static unsigned mod_pattern;
249 module_param_named(pattern, mod_pattern, uint, S_IRUGO | S_IWUSR);
250 MODULE_PARM_DESC(mod_pattern, "i/o pattern (0 == zeroes)");
251 
252 static inline void simple_fill_buf(struct urb *urb)
253 {
254 	unsigned	i;
255 	u8		*buf = urb->transfer_buffer;
256 	unsigned	len = urb->transfer_buffer_length;
257 
258 	switch (pattern) {
259 	default:
260 		/* FALLTHROUGH */
261 	case 0:
262 		memset(buf, 0, len);
263 		break;
264 	case 1:			/* mod63 */
265 		for (i = 0; i < len; i++)
266 			*buf++ = (u8) (i % 63);
267 		break;
268 	}
269 }
270 
271 static inline unsigned long buffer_offset(void *buf)
272 {
273 	return (unsigned long)buf & (ARCH_KMALLOC_MINALIGN - 1);
274 }
275 
276 static int check_guard_bytes(struct usbtest_dev *tdev, struct urb *urb)
277 {
278 	u8 *buf = urb->transfer_buffer;
279 	u8 *guard = buf - buffer_offset(buf);
280 	unsigned i;
281 
282 	for (i = 0; guard < buf; i++, guard++) {
283 		if (*guard != GUARD_BYTE) {
284 			ERROR(tdev, "guard byte[%d] %d (not %d)\n",
285 				i, *guard, GUARD_BYTE);
286 			return -EINVAL;
287 		}
288 	}
289 	return 0;
290 }
291 
292 static int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb)
293 {
294 	unsigned	i;
295 	u8		expected;
296 	u8		*buf = urb->transfer_buffer;
297 	unsigned	len = urb->actual_length;
298 
299 	int ret = check_guard_bytes(tdev, urb);
300 	if (ret)
301 		return ret;
302 
303 	for (i = 0; i < len; i++, buf++) {
304 		switch (pattern) {
305 		/* all-zeroes has no synchronization issues */
306 		case 0:
307 			expected = 0;
308 			break;
309 		/* mod63 stays in sync with short-terminated transfers,
310 		 * or otherwise when host and gadget agree on how large
311 		 * each usb transfer request should be.  resync is done
312 		 * with set_interface or set_config.
313 		 */
314 		case 1:			/* mod63 */
315 			expected = i % 63;
316 			break;
317 		/* always fail unsupported patterns */
318 		default:
319 			expected = !*buf;
320 			break;
321 		}
322 		if (*buf == expected)
323 			continue;
324 		ERROR(tdev, "buf[%d] = %d (not %d)\n", i, *buf, expected);
325 		return -EINVAL;
326 	}
327 	return 0;
328 }
329 
330 static void simple_free_urb(struct urb *urb)
331 {
332 	unsigned long offset = buffer_offset(urb->transfer_buffer);
333 
334 	if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
335 		usb_free_coherent(
336 			urb->dev,
337 			urb->transfer_buffer_length + offset,
338 			urb->transfer_buffer - offset,
339 			urb->transfer_dma - offset);
340 	else
341 		kfree(urb->transfer_buffer - offset);
342 	usb_free_urb(urb);
343 }
344 
345 static int simple_io(
346 	struct usbtest_dev	*tdev,
347 	struct urb		*urb,
348 	int			iterations,
349 	int			vary,
350 	int			expected,
351 	const char		*label
352 )
353 {
354 	struct usb_device	*udev = urb->dev;
355 	int			max = urb->transfer_buffer_length;
356 	struct completion	completion;
357 	int			retval = 0;
358 
359 	urb->context = &completion;
360 	while (retval == 0 && iterations-- > 0) {
361 		init_completion(&completion);
362 		if (usb_pipeout(urb->pipe))
363 			simple_fill_buf(urb);
364 		retval = usb_submit_urb(urb, GFP_KERNEL);
365 		if (retval != 0)
366 			break;
367 
368 		/* NOTE:  no timeouts; can't be broken out of by interrupt */
369 		wait_for_completion(&completion);
370 		retval = urb->status;
371 		urb->dev = udev;
372 		if (retval == 0 && usb_pipein(urb->pipe))
373 			retval = simple_check_buf(tdev, urb);
374 
375 		if (vary) {
376 			int	len = urb->transfer_buffer_length;
377 
378 			len += vary;
379 			len %= max;
380 			if (len == 0)
381 				len = (vary < max) ? vary : max;
382 			urb->transfer_buffer_length = len;
383 		}
384 
385 		/* FIXME if endpoint halted, clear halt (and log) */
386 	}
387 	urb->transfer_buffer_length = max;
388 
389 	if (expected != retval)
390 		dev_err(&udev->dev,
391 			"%s failed, iterations left %d, status %d (not %d)\n",
392 				label, iterations, retval, expected);
393 	return retval;
394 }
395 
396 
397 /*-------------------------------------------------------------------------*/
398 
399 /* We use scatterlist primitives to test queued I/O.
400  * Yes, this also tests the scatterlist primitives.
401  */
402 
403 static void free_sglist(struct scatterlist *sg, int nents)
404 {
405 	unsigned		i;
406 
407 	if (!sg)
408 		return;
409 	for (i = 0; i < nents; i++) {
410 		if (!sg_page(&sg[i]))
411 			continue;
412 		kfree(sg_virt(&sg[i]));
413 	}
414 	kfree(sg);
415 }
416 
417 static struct scatterlist *
418 alloc_sglist(int nents, int max, int vary)
419 {
420 	struct scatterlist	*sg;
421 	unsigned		i;
422 	unsigned		size = max;
423 
424 	sg = kmalloc(nents * sizeof *sg, GFP_KERNEL);
425 	if (!sg)
426 		return NULL;
427 	sg_init_table(sg, nents);
428 
429 	for (i = 0; i < nents; i++) {
430 		char		*buf;
431 		unsigned	j;
432 
433 		buf = kzalloc(size, GFP_KERNEL);
434 		if (!buf) {
435 			free_sglist(sg, i);
436 			return NULL;
437 		}
438 
439 		/* kmalloc pages are always physically contiguous! */
440 		sg_set_buf(&sg[i], buf, size);
441 
442 		switch (pattern) {
443 		case 0:
444 			/* already zeroed */
445 			break;
446 		case 1:
447 			for (j = 0; j < size; j++)
448 				*buf++ = (u8) (j % 63);
449 			break;
450 		}
451 
452 		if (vary) {
453 			size += vary;
454 			size %= max;
455 			if (size == 0)
456 				size = (vary < max) ? vary : max;
457 		}
458 	}
459 
460 	return sg;
461 }
462 
463 static int perform_sglist(
464 	struct usbtest_dev	*tdev,
465 	unsigned		iterations,
466 	int			pipe,
467 	struct usb_sg_request	*req,
468 	struct scatterlist	*sg,
469 	int			nents
470 )
471 {
472 	struct usb_device	*udev = testdev_to_usbdev(tdev);
473 	int			retval = 0;
474 
475 	while (retval == 0 && iterations-- > 0) {
476 		retval = usb_sg_init(req, udev, pipe,
477 				(udev->speed == USB_SPEED_HIGH)
478 					? (INTERRUPT_RATE << 3)
479 					: INTERRUPT_RATE,
480 				sg, nents, 0, GFP_KERNEL);
481 
482 		if (retval)
483 			break;
484 		usb_sg_wait(req);
485 		retval = req->status;
486 
487 		/* FIXME check resulting data pattern */
488 
489 		/* FIXME if endpoint halted, clear halt (and log) */
490 	}
491 
492 	/* FIXME for unlink or fault handling tests, don't report
493 	 * failure if retval is as we expected ...
494 	 */
495 	if (retval)
496 		ERROR(tdev, "perform_sglist failed, "
497 				"iterations left %d, status %d\n",
498 				iterations, retval);
499 	return retval;
500 }
501 
502 
503 /*-------------------------------------------------------------------------*/
504 
505 /* unqueued control message testing
506  *
507  * there's a nice set of device functional requirements in chapter 9 of the
508  * usb 2.0 spec, which we can apply to ANY device, even ones that don't use
509  * special test firmware.
510  *
511  * we know the device is configured (or suspended) by the time it's visible
512  * through usbfs.  we can't change that, so we won't test enumeration (which
513  * worked 'well enough' to get here, this time), power management (ditto),
514  * or remote wakeup (which needs human interaction).
515  */
516 
517 static unsigned realworld = 1;
518 module_param(realworld, uint, 0);
519 MODULE_PARM_DESC(realworld, "clear to demand stricter spec compliance");
520 
521 static int get_altsetting(struct usbtest_dev *dev)
522 {
523 	struct usb_interface	*iface = dev->intf;
524 	struct usb_device	*udev = interface_to_usbdev(iface);
525 	int			retval;
526 
527 	retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
528 			USB_REQ_GET_INTERFACE, USB_DIR_IN|USB_RECIP_INTERFACE,
529 			0, iface->altsetting[0].desc.bInterfaceNumber,
530 			dev->buf, 1, USB_CTRL_GET_TIMEOUT);
531 	switch (retval) {
532 	case 1:
533 		return dev->buf[0];
534 	case 0:
535 		retval = -ERANGE;
536 		/* FALLTHROUGH */
537 	default:
538 		return retval;
539 	}
540 }
541 
542 static int set_altsetting(struct usbtest_dev *dev, int alternate)
543 {
544 	struct usb_interface		*iface = dev->intf;
545 	struct usb_device		*udev;
546 
547 	if (alternate < 0 || alternate >= 256)
548 		return -EINVAL;
549 
550 	udev = interface_to_usbdev(iface);
551 	return usb_set_interface(udev,
552 			iface->altsetting[0].desc.bInterfaceNumber,
553 			alternate);
554 }
555 
556 static int is_good_config(struct usbtest_dev *tdev, int len)
557 {
558 	struct usb_config_descriptor	*config;
559 
560 	if (len < sizeof *config)
561 		return 0;
562 	config = (struct usb_config_descriptor *) tdev->buf;
563 
564 	switch (config->bDescriptorType) {
565 	case USB_DT_CONFIG:
566 	case USB_DT_OTHER_SPEED_CONFIG:
567 		if (config->bLength != 9) {
568 			ERROR(tdev, "bogus config descriptor length\n");
569 			return 0;
570 		}
571 		/* this bit 'must be 1' but often isn't */
572 		if (!realworld && !(config->bmAttributes & 0x80)) {
573 			ERROR(tdev, "high bit of config attributes not set\n");
574 			return 0;
575 		}
576 		if (config->bmAttributes & 0x1f) {	/* reserved == 0 */
577 			ERROR(tdev, "reserved config bits set\n");
578 			return 0;
579 		}
580 		break;
581 	default:
582 		return 0;
583 	}
584 
585 	if (le16_to_cpu(config->wTotalLength) == len)	/* read it all */
586 		return 1;
587 	if (le16_to_cpu(config->wTotalLength) >= TBUF_SIZE)	/* max partial read */
588 		return 1;
589 	ERROR(tdev, "bogus config descriptor read size\n");
590 	return 0;
591 }
592 
593 /* sanity test for standard requests working with usb_control_mesg() and some
594  * of the utility functions which use it.
595  *
596  * this doesn't test how endpoint halts behave or data toggles get set, since
597  * we won't do I/O to bulk/interrupt endpoints here (which is how to change
598  * halt or toggle).  toggle testing is impractical without support from hcds.
599  *
600  * this avoids failing devices linux would normally work with, by not testing
601  * config/altsetting operations for devices that only support their defaults.
602  * such devices rarely support those needless operations.
603  *
604  * NOTE that since this is a sanity test, it's not examining boundary cases
605  * to see if usbcore, hcd, and device all behave right.  such testing would
606  * involve varied read sizes and other operation sequences.
607  */
608 static int ch9_postconfig(struct usbtest_dev *dev)
609 {
610 	struct usb_interface	*iface = dev->intf;
611 	struct usb_device	*udev = interface_to_usbdev(iface);
612 	int			i, alt, retval;
613 
614 	/* [9.2.3] if there's more than one altsetting, we need to be able to
615 	 * set and get each one.  mostly trusts the descriptors from usbcore.
616 	 */
617 	for (i = 0; i < iface->num_altsetting; i++) {
618 
619 		/* 9.2.3 constrains the range here */
620 		alt = iface->altsetting[i].desc.bAlternateSetting;
621 		if (alt < 0 || alt >= iface->num_altsetting) {
622 			dev_err(&iface->dev,
623 					"invalid alt [%d].bAltSetting = %d\n",
624 					i, alt);
625 		}
626 
627 		/* [real world] get/set unimplemented if there's only one */
628 		if (realworld && iface->num_altsetting == 1)
629 			continue;
630 
631 		/* [9.4.10] set_interface */
632 		retval = set_altsetting(dev, alt);
633 		if (retval) {
634 			dev_err(&iface->dev, "can't set_interface = %d, %d\n",
635 					alt, retval);
636 			return retval;
637 		}
638 
639 		/* [9.4.4] get_interface always works */
640 		retval = get_altsetting(dev);
641 		if (retval != alt) {
642 			dev_err(&iface->dev, "get alt should be %d, was %d\n",
643 					alt, retval);
644 			return (retval < 0) ? retval : -EDOM;
645 		}
646 
647 	}
648 
649 	/* [real world] get_config unimplemented if there's only one */
650 	if (!realworld || udev->descriptor.bNumConfigurations != 1) {
651 		int	expected = udev->actconfig->desc.bConfigurationValue;
652 
653 		/* [9.4.2] get_configuration always works
654 		 * ... although some cheap devices (like one TI Hub I've got)
655 		 * won't return config descriptors except before set_config.
656 		 */
657 		retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
658 				USB_REQ_GET_CONFIGURATION,
659 				USB_DIR_IN | USB_RECIP_DEVICE,
660 				0, 0, dev->buf, 1, USB_CTRL_GET_TIMEOUT);
661 		if (retval != 1 || dev->buf[0] != expected) {
662 			dev_err(&iface->dev, "get config --> %d %d (1 %d)\n",
663 				retval, dev->buf[0], expected);
664 			return (retval < 0) ? retval : -EDOM;
665 		}
666 	}
667 
668 	/* there's always [9.4.3] a device descriptor [9.6.1] */
669 	retval = usb_get_descriptor(udev, USB_DT_DEVICE, 0,
670 			dev->buf, sizeof udev->descriptor);
671 	if (retval != sizeof udev->descriptor) {
672 		dev_err(&iface->dev, "dev descriptor --> %d\n", retval);
673 		return (retval < 0) ? retval : -EDOM;
674 	}
675 
676 	/* there's always [9.4.3] at least one config descriptor [9.6.3] */
677 	for (i = 0; i < udev->descriptor.bNumConfigurations; i++) {
678 		retval = usb_get_descriptor(udev, USB_DT_CONFIG, i,
679 				dev->buf, TBUF_SIZE);
680 		if (!is_good_config(dev, retval)) {
681 			dev_err(&iface->dev,
682 					"config [%d] descriptor --> %d\n",
683 					i, retval);
684 			return (retval < 0) ? retval : -EDOM;
685 		}
686 
687 		/* FIXME cross-checking udev->config[i] to make sure usbcore
688 		 * parsed it right (etc) would be good testing paranoia
689 		 */
690 	}
691 
692 	/* and sometimes [9.2.6.6] speed dependent descriptors */
693 	if (le16_to_cpu(udev->descriptor.bcdUSB) == 0x0200) {
694 		struct usb_qualifier_descriptor *d = NULL;
695 
696 		/* device qualifier [9.6.2] */
697 		retval = usb_get_descriptor(udev,
698 				USB_DT_DEVICE_QUALIFIER, 0, dev->buf,
699 				sizeof(struct usb_qualifier_descriptor));
700 		if (retval == -EPIPE) {
701 			if (udev->speed == USB_SPEED_HIGH) {
702 				dev_err(&iface->dev,
703 						"hs dev qualifier --> %d\n",
704 						retval);
705 				return (retval < 0) ? retval : -EDOM;
706 			}
707 			/* usb2.0 but not high-speed capable; fine */
708 		} else if (retval != sizeof(struct usb_qualifier_descriptor)) {
709 			dev_err(&iface->dev, "dev qualifier --> %d\n", retval);
710 			return (retval < 0) ? retval : -EDOM;
711 		} else
712 			d = (struct usb_qualifier_descriptor *) dev->buf;
713 
714 		/* might not have [9.6.2] any other-speed configs [9.6.4] */
715 		if (d) {
716 			unsigned max = d->bNumConfigurations;
717 			for (i = 0; i < max; i++) {
718 				retval = usb_get_descriptor(udev,
719 					USB_DT_OTHER_SPEED_CONFIG, i,
720 					dev->buf, TBUF_SIZE);
721 				if (!is_good_config(dev, retval)) {
722 					dev_err(&iface->dev,
723 						"other speed config --> %d\n",
724 						retval);
725 					return (retval < 0) ? retval : -EDOM;
726 				}
727 			}
728 		}
729 	}
730 	/* FIXME fetch strings from at least the device descriptor */
731 
732 	/* [9.4.5] get_status always works */
733 	retval = usb_get_status(udev, USB_RECIP_DEVICE, 0, dev->buf);
734 	if (retval != 2) {
735 		dev_err(&iface->dev, "get dev status --> %d\n", retval);
736 		return (retval < 0) ? retval : -EDOM;
737 	}
738 
739 	/* FIXME configuration.bmAttributes says if we could try to set/clear
740 	 * the device's remote wakeup feature ... if we can, test that here
741 	 */
742 
743 	retval = usb_get_status(udev, USB_RECIP_INTERFACE,
744 			iface->altsetting[0].desc.bInterfaceNumber, dev->buf);
745 	if (retval != 2) {
746 		dev_err(&iface->dev, "get interface status --> %d\n", retval);
747 		return (retval < 0) ? retval : -EDOM;
748 	}
749 	/* FIXME get status for each endpoint in the interface */
750 
751 	return 0;
752 }
753 
754 /*-------------------------------------------------------------------------*/
755 
756 /* use ch9 requests to test whether:
757  *   (a) queues work for control, keeping N subtests queued and
758  *       active (auto-resubmit) for M loops through the queue.
759  *   (b) protocol stalls (control-only) will autorecover.
760  *       it's not like bulk/intr; no halt clearing.
761  *   (c) short control reads are reported and handled.
762  *   (d) queues are always processed in-order
763  */
764 
765 struct ctrl_ctx {
766 	spinlock_t		lock;
767 	struct usbtest_dev	*dev;
768 	struct completion	complete;
769 	unsigned		count;
770 	unsigned		pending;
771 	int			status;
772 	struct urb		**urb;
773 	struct usbtest_param	*param;
774 	int			last;
775 };
776 
777 #define NUM_SUBCASES	15		/* how many test subcases here? */
778 
779 struct subcase {
780 	struct usb_ctrlrequest	setup;
781 	int			number;
782 	int			expected;
783 };
784 
785 static void ctrl_complete(struct urb *urb)
786 {
787 	struct ctrl_ctx		*ctx = urb->context;
788 	struct usb_ctrlrequest	*reqp;
789 	struct subcase		*subcase;
790 	int			status = urb->status;
791 
792 	reqp = (struct usb_ctrlrequest *)urb->setup_packet;
793 	subcase = container_of(reqp, struct subcase, setup);
794 
795 	spin_lock(&ctx->lock);
796 	ctx->count--;
797 	ctx->pending--;
798 
799 	/* queue must transfer and complete in fifo order, unless
800 	 * usb_unlink_urb() is used to unlink something not at the
801 	 * physical queue head (not tested).
802 	 */
803 	if (subcase->number > 0) {
804 		if ((subcase->number - ctx->last) != 1) {
805 			ERROR(ctx->dev,
806 				"subcase %d completed out of order, last %d\n",
807 				subcase->number, ctx->last);
808 			status = -EDOM;
809 			ctx->last = subcase->number;
810 			goto error;
811 		}
812 	}
813 	ctx->last = subcase->number;
814 
815 	/* succeed or fault in only one way? */
816 	if (status == subcase->expected)
817 		status = 0;
818 
819 	/* async unlink for cleanup? */
820 	else if (status != -ECONNRESET) {
821 
822 		/* some faults are allowed, not required */
823 		if (subcase->expected > 0 && (
824 			  ((status == -subcase->expected	/* happened */
825 			   || status == 0))))			/* didn't */
826 			status = 0;
827 		/* sometimes more than one fault is allowed */
828 		else if (subcase->number == 12 && status == -EPIPE)
829 			status = 0;
830 		else
831 			ERROR(ctx->dev, "subtest %d error, status %d\n",
832 					subcase->number, status);
833 	}
834 
835 	/* unexpected status codes mean errors; ideally, in hardware */
836 	if (status) {
837 error:
838 		if (ctx->status == 0) {
839 			int		i;
840 
841 			ctx->status = status;
842 			ERROR(ctx->dev, "control queue %02x.%02x, err %d, "
843 					"%d left, subcase %d, len %d/%d\n",
844 					reqp->bRequestType, reqp->bRequest,
845 					status, ctx->count, subcase->number,
846 					urb->actual_length,
847 					urb->transfer_buffer_length);
848 
849 			/* FIXME this "unlink everything" exit route should
850 			 * be a separate test case.
851 			 */
852 
853 			/* unlink whatever's still pending */
854 			for (i = 1; i < ctx->param->sglen; i++) {
855 				struct urb *u = ctx->urb[
856 							(i + subcase->number)
857 							% ctx->param->sglen];
858 
859 				if (u == urb || !u->dev)
860 					continue;
861 				spin_unlock(&ctx->lock);
862 				status = usb_unlink_urb(u);
863 				spin_lock(&ctx->lock);
864 				switch (status) {
865 				case -EINPROGRESS:
866 				case -EBUSY:
867 				case -EIDRM:
868 					continue;
869 				default:
870 					ERROR(ctx->dev, "urb unlink --> %d\n",
871 							status);
872 				}
873 			}
874 			status = ctx->status;
875 		}
876 	}
877 
878 	/* resubmit if we need to, else mark this as done */
879 	if ((status == 0) && (ctx->pending < ctx->count)) {
880 		status = usb_submit_urb(urb, GFP_ATOMIC);
881 		if (status != 0) {
882 			ERROR(ctx->dev,
883 				"can't resubmit ctrl %02x.%02x, err %d\n",
884 				reqp->bRequestType, reqp->bRequest, status);
885 			urb->dev = NULL;
886 		} else
887 			ctx->pending++;
888 	} else
889 		urb->dev = NULL;
890 
891 	/* signal completion when nothing's queued */
892 	if (ctx->pending == 0)
893 		complete(&ctx->complete);
894 	spin_unlock(&ctx->lock);
895 }
896 
897 static int
898 test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param *param)
899 {
900 	struct usb_device	*udev = testdev_to_usbdev(dev);
901 	struct urb		**urb;
902 	struct ctrl_ctx		context;
903 	int			i;
904 
905 	spin_lock_init(&context.lock);
906 	context.dev = dev;
907 	init_completion(&context.complete);
908 	context.count = param->sglen * param->iterations;
909 	context.pending = 0;
910 	context.status = -ENOMEM;
911 	context.param = param;
912 	context.last = -1;
913 
914 	/* allocate and init the urbs we'll queue.
915 	 * as with bulk/intr sglists, sglen is the queue depth; it also
916 	 * controls which subtests run (more tests than sglen) or rerun.
917 	 */
918 	urb = kcalloc(param->sglen, sizeof(struct urb *), GFP_KERNEL);
919 	if (!urb)
920 		return -ENOMEM;
921 	for (i = 0; i < param->sglen; i++) {
922 		int			pipe = usb_rcvctrlpipe(udev, 0);
923 		unsigned		len;
924 		struct urb		*u;
925 		struct usb_ctrlrequest	req;
926 		struct subcase		*reqp;
927 
928 		/* sign of this variable means:
929 		 *  -: tested code must return this (negative) error code
930 		 *  +: tested code may return this (negative too) error code
931 		 */
932 		int			expected = 0;
933 
934 		/* requests here are mostly expected to succeed on any
935 		 * device, but some are chosen to trigger protocol stalls
936 		 * or short reads.
937 		 */
938 		memset(&req, 0, sizeof req);
939 		req.bRequest = USB_REQ_GET_DESCRIPTOR;
940 		req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
941 
942 		switch (i % NUM_SUBCASES) {
943 		case 0:		/* get device descriptor */
944 			req.wValue = cpu_to_le16(USB_DT_DEVICE << 8);
945 			len = sizeof(struct usb_device_descriptor);
946 			break;
947 		case 1:		/* get first config descriptor (only) */
948 			req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
949 			len = sizeof(struct usb_config_descriptor);
950 			break;
951 		case 2:		/* get altsetting (OFTEN STALLS) */
952 			req.bRequest = USB_REQ_GET_INTERFACE;
953 			req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
954 			/* index = 0 means first interface */
955 			len = 1;
956 			expected = EPIPE;
957 			break;
958 		case 3:		/* get interface status */
959 			req.bRequest = USB_REQ_GET_STATUS;
960 			req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
961 			/* interface 0 */
962 			len = 2;
963 			break;
964 		case 4:		/* get device status */
965 			req.bRequest = USB_REQ_GET_STATUS;
966 			req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
967 			len = 2;
968 			break;
969 		case 5:		/* get device qualifier (MAY STALL) */
970 			req.wValue = cpu_to_le16 (USB_DT_DEVICE_QUALIFIER << 8);
971 			len = sizeof(struct usb_qualifier_descriptor);
972 			if (udev->speed != USB_SPEED_HIGH)
973 				expected = EPIPE;
974 			break;
975 		case 6:		/* get first config descriptor, plus interface */
976 			req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
977 			len = sizeof(struct usb_config_descriptor);
978 			len += sizeof(struct usb_interface_descriptor);
979 			break;
980 		case 7:		/* get interface descriptor (ALWAYS STALLS) */
981 			req.wValue = cpu_to_le16 (USB_DT_INTERFACE << 8);
982 			/* interface == 0 */
983 			len = sizeof(struct usb_interface_descriptor);
984 			expected = -EPIPE;
985 			break;
986 		/* NOTE: two consecutive stalls in the queue here.
987 		 *  that tests fault recovery a bit more aggressively. */
988 		case 8:		/* clear endpoint halt (MAY STALL) */
989 			req.bRequest = USB_REQ_CLEAR_FEATURE;
990 			req.bRequestType = USB_RECIP_ENDPOINT;
991 			/* wValue 0 == ep halt */
992 			/* wIndex 0 == ep0 (shouldn't halt!) */
993 			len = 0;
994 			pipe = usb_sndctrlpipe(udev, 0);
995 			expected = EPIPE;
996 			break;
997 		case 9:		/* get endpoint status */
998 			req.bRequest = USB_REQ_GET_STATUS;
999 			req.bRequestType = USB_DIR_IN|USB_RECIP_ENDPOINT;
1000 			/* endpoint 0 */
1001 			len = 2;
1002 			break;
1003 		case 10:	/* trigger short read (EREMOTEIO) */
1004 			req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1005 			len = 1024;
1006 			expected = -EREMOTEIO;
1007 			break;
1008 		/* NOTE: two consecutive _different_ faults in the queue. */
1009 		case 11:	/* get endpoint descriptor (ALWAYS STALLS) */
1010 			req.wValue = cpu_to_le16(USB_DT_ENDPOINT << 8);
1011 			/* endpoint == 0 */
1012 			len = sizeof(struct usb_interface_descriptor);
1013 			expected = EPIPE;
1014 			break;
1015 		/* NOTE: sometimes even a third fault in the queue! */
1016 		case 12:	/* get string 0 descriptor (MAY STALL) */
1017 			req.wValue = cpu_to_le16(USB_DT_STRING << 8);
1018 			/* string == 0, for language IDs */
1019 			len = sizeof(struct usb_interface_descriptor);
1020 			/* may succeed when > 4 languages */
1021 			expected = EREMOTEIO;	/* or EPIPE, if no strings */
1022 			break;
1023 		case 13:	/* short read, resembling case 10 */
1024 			req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1025 			/* last data packet "should" be DATA1, not DATA0 */
1026 			len = 1024 - udev->descriptor.bMaxPacketSize0;
1027 			expected = -EREMOTEIO;
1028 			break;
1029 		case 14:	/* short read; try to fill the last packet */
1030 			req.wValue = cpu_to_le16((USB_DT_DEVICE << 8) | 0);
1031 			/* device descriptor size == 18 bytes */
1032 			len = udev->descriptor.bMaxPacketSize0;
1033 			if (udev->speed == USB_SPEED_SUPER)
1034 				len = 512;
1035 			switch (len) {
1036 			case 8:
1037 				len = 24;
1038 				break;
1039 			case 16:
1040 				len = 32;
1041 				break;
1042 			}
1043 			expected = -EREMOTEIO;
1044 			break;
1045 		default:
1046 			ERROR(dev, "bogus number of ctrl queue testcases!\n");
1047 			context.status = -EINVAL;
1048 			goto cleanup;
1049 		}
1050 		req.wLength = cpu_to_le16(len);
1051 		urb[i] = u = simple_alloc_urb(udev, pipe, len);
1052 		if (!u)
1053 			goto cleanup;
1054 
1055 		reqp = kmalloc(sizeof *reqp, GFP_KERNEL);
1056 		if (!reqp)
1057 			goto cleanup;
1058 		reqp->setup = req;
1059 		reqp->number = i % NUM_SUBCASES;
1060 		reqp->expected = expected;
1061 		u->setup_packet = (char *) &reqp->setup;
1062 
1063 		u->context = &context;
1064 		u->complete = ctrl_complete;
1065 	}
1066 
1067 	/* queue the urbs */
1068 	context.urb = urb;
1069 	spin_lock_irq(&context.lock);
1070 	for (i = 0; i < param->sglen; i++) {
1071 		context.status = usb_submit_urb(urb[i], GFP_ATOMIC);
1072 		if (context.status != 0) {
1073 			ERROR(dev, "can't submit urb[%d], status %d\n",
1074 					i, context.status);
1075 			context.count = context.pending;
1076 			break;
1077 		}
1078 		context.pending++;
1079 	}
1080 	spin_unlock_irq(&context.lock);
1081 
1082 	/* FIXME  set timer and time out; provide a disconnect hook */
1083 
1084 	/* wait for the last one to complete */
1085 	if (context.pending > 0)
1086 		wait_for_completion(&context.complete);
1087 
1088 cleanup:
1089 	for (i = 0; i < param->sglen; i++) {
1090 		if (!urb[i])
1091 			continue;
1092 		urb[i]->dev = udev;
1093 		kfree(urb[i]->setup_packet);
1094 		simple_free_urb(urb[i]);
1095 	}
1096 	kfree(urb);
1097 	return context.status;
1098 }
1099 #undef NUM_SUBCASES
1100 
1101 
1102 /*-------------------------------------------------------------------------*/
1103 
1104 static void unlink1_callback(struct urb *urb)
1105 {
1106 	int	status = urb->status;
1107 
1108 	/* we "know" -EPIPE (stall) never happens */
1109 	if (!status)
1110 		status = usb_submit_urb(urb, GFP_ATOMIC);
1111 	if (status) {
1112 		urb->status = status;
1113 		complete(urb->context);
1114 	}
1115 }
1116 
1117 static int unlink1(struct usbtest_dev *dev, int pipe, int size, int async)
1118 {
1119 	struct urb		*urb;
1120 	struct completion	completion;
1121 	int			retval = 0;
1122 
1123 	init_completion(&completion);
1124 	urb = simple_alloc_urb(testdev_to_usbdev(dev), pipe, size);
1125 	if (!urb)
1126 		return -ENOMEM;
1127 	urb->context = &completion;
1128 	urb->complete = unlink1_callback;
1129 
1130 	/* keep the endpoint busy.  there are lots of hc/hcd-internal
1131 	 * states, and testing should get to all of them over time.
1132 	 *
1133 	 * FIXME want additional tests for when endpoint is STALLing
1134 	 * due to errors, or is just NAKing requests.
1135 	 */
1136 	retval = usb_submit_urb(urb, GFP_KERNEL);
1137 	if (retval != 0) {
1138 		dev_err(&dev->intf->dev, "submit fail %d\n", retval);
1139 		return retval;
1140 	}
1141 
1142 	/* unlinking that should always work.  variable delay tests more
1143 	 * hcd states and code paths, even with little other system load.
1144 	 */
1145 	msleep(jiffies % (2 * INTERRUPT_RATE));
1146 	if (async) {
1147 		while (!completion_done(&completion)) {
1148 			retval = usb_unlink_urb(urb);
1149 
1150 			switch (retval) {
1151 			case -EBUSY:
1152 			case -EIDRM:
1153 				/* we can't unlink urbs while they're completing
1154 				 * or if they've completed, and we haven't
1155 				 * resubmitted. "normal" drivers would prevent
1156 				 * resubmission, but since we're testing unlink
1157 				 * paths, we can't.
1158 				 */
1159 				ERROR(dev, "unlink retry\n");
1160 				continue;
1161 			case 0:
1162 			case -EINPROGRESS:
1163 				break;
1164 
1165 			default:
1166 				dev_err(&dev->intf->dev,
1167 					"unlink fail %d\n", retval);
1168 				return retval;
1169 			}
1170 
1171 			break;
1172 		}
1173 	} else
1174 		usb_kill_urb(urb);
1175 
1176 	wait_for_completion(&completion);
1177 	retval = urb->status;
1178 	simple_free_urb(urb);
1179 
1180 	if (async)
1181 		return (retval == -ECONNRESET) ? 0 : retval - 1000;
1182 	else
1183 		return (retval == -ENOENT || retval == -EPERM) ?
1184 				0 : retval - 2000;
1185 }
1186 
1187 static int unlink_simple(struct usbtest_dev *dev, int pipe, int len)
1188 {
1189 	int			retval = 0;
1190 
1191 	/* test sync and async paths */
1192 	retval = unlink1(dev, pipe, len, 1);
1193 	if (!retval)
1194 		retval = unlink1(dev, pipe, len, 0);
1195 	return retval;
1196 }
1197 
1198 /*-------------------------------------------------------------------------*/
1199 
1200 struct queued_ctx {
1201 	struct completion	complete;
1202 	atomic_t		pending;
1203 	unsigned		num;
1204 	int			status;
1205 	struct urb		**urbs;
1206 };
1207 
1208 static void unlink_queued_callback(struct urb *urb)
1209 {
1210 	int			status = urb->status;
1211 	struct queued_ctx	*ctx = urb->context;
1212 
1213 	if (ctx->status)
1214 		goto done;
1215 	if (urb == ctx->urbs[ctx->num - 4] || urb == ctx->urbs[ctx->num - 2]) {
1216 		if (status == -ECONNRESET)
1217 			goto done;
1218 		/* What error should we report if the URB completed normally? */
1219 	}
1220 	if (status != 0)
1221 		ctx->status = status;
1222 
1223  done:
1224 	if (atomic_dec_and_test(&ctx->pending))
1225 		complete(&ctx->complete);
1226 }
1227 
1228 static int unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num,
1229 		unsigned size)
1230 {
1231 	struct queued_ctx	ctx;
1232 	struct usb_device	*udev = testdev_to_usbdev(dev);
1233 	void			*buf;
1234 	dma_addr_t		buf_dma;
1235 	int			i;
1236 	int			retval = -ENOMEM;
1237 
1238 	init_completion(&ctx.complete);
1239 	atomic_set(&ctx.pending, 1);	/* One more than the actual value */
1240 	ctx.num = num;
1241 	ctx.status = 0;
1242 
1243 	buf = usb_alloc_coherent(udev, size, GFP_KERNEL, &buf_dma);
1244 	if (!buf)
1245 		return retval;
1246 	memset(buf, 0, size);
1247 
1248 	/* Allocate and init the urbs we'll queue */
1249 	ctx.urbs = kcalloc(num, sizeof(struct urb *), GFP_KERNEL);
1250 	if (!ctx.urbs)
1251 		goto free_buf;
1252 	for (i = 0; i < num; i++) {
1253 		ctx.urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
1254 		if (!ctx.urbs[i])
1255 			goto free_urbs;
1256 		usb_fill_bulk_urb(ctx.urbs[i], udev, pipe, buf, size,
1257 				unlink_queued_callback, &ctx);
1258 		ctx.urbs[i]->transfer_dma = buf_dma;
1259 		ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
1260 	}
1261 
1262 	/* Submit all the URBs and then unlink URBs num - 4 and num - 2. */
1263 	for (i = 0; i < num; i++) {
1264 		atomic_inc(&ctx.pending);
1265 		retval = usb_submit_urb(ctx.urbs[i], GFP_KERNEL);
1266 		if (retval != 0) {
1267 			dev_err(&dev->intf->dev, "submit urbs[%d] fail %d\n",
1268 					i, retval);
1269 			atomic_dec(&ctx.pending);
1270 			ctx.status = retval;
1271 			break;
1272 		}
1273 	}
1274 	if (i == num) {
1275 		usb_unlink_urb(ctx.urbs[num - 4]);
1276 		usb_unlink_urb(ctx.urbs[num - 2]);
1277 	} else {
1278 		while (--i >= 0)
1279 			usb_unlink_urb(ctx.urbs[i]);
1280 	}
1281 
1282 	if (atomic_dec_and_test(&ctx.pending))		/* The extra count */
1283 		complete(&ctx.complete);
1284 	wait_for_completion(&ctx.complete);
1285 	retval = ctx.status;
1286 
1287  free_urbs:
1288 	for (i = 0; i < num; i++)
1289 		usb_free_urb(ctx.urbs[i]);
1290 	kfree(ctx.urbs);
1291  free_buf:
1292 	usb_free_coherent(udev, size, buf, buf_dma);
1293 	return retval;
1294 }
1295 
1296 /*-------------------------------------------------------------------------*/
1297 
1298 static int verify_not_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
1299 {
1300 	int	retval;
1301 	u16	status;
1302 
1303 	/* shouldn't look or act halted */
1304 	retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
1305 	if (retval < 0) {
1306 		ERROR(tdev, "ep %02x couldn't get no-halt status, %d\n",
1307 				ep, retval);
1308 		return retval;
1309 	}
1310 	if (status != 0) {
1311 		ERROR(tdev, "ep %02x bogus status: %04x != 0\n", ep, status);
1312 		return -EINVAL;
1313 	}
1314 	retval = simple_io(tdev, urb, 1, 0, 0, __func__);
1315 	if (retval != 0)
1316 		return -EINVAL;
1317 	return 0;
1318 }
1319 
1320 static int verify_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
1321 {
1322 	int	retval;
1323 	u16	status;
1324 
1325 	/* should look and act halted */
1326 	retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
1327 	if (retval < 0) {
1328 		ERROR(tdev, "ep %02x couldn't get halt status, %d\n",
1329 				ep, retval);
1330 		return retval;
1331 	}
1332 	le16_to_cpus(&status);
1333 	if (status != 1) {
1334 		ERROR(tdev, "ep %02x bogus status: %04x != 1\n", ep, status);
1335 		return -EINVAL;
1336 	}
1337 	retval = simple_io(tdev, urb, 1, 0, -EPIPE, __func__);
1338 	if (retval != -EPIPE)
1339 		return -EINVAL;
1340 	retval = simple_io(tdev, urb, 1, 0, -EPIPE, "verify_still_halted");
1341 	if (retval != -EPIPE)
1342 		return -EINVAL;
1343 	return 0;
1344 }
1345 
1346 static int test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb)
1347 {
1348 	int	retval;
1349 
1350 	/* shouldn't look or act halted now */
1351 	retval = verify_not_halted(tdev, ep, urb);
1352 	if (retval < 0)
1353 		return retval;
1354 
1355 	/* set halt (protocol test only), verify it worked */
1356 	retval = usb_control_msg(urb->dev, usb_sndctrlpipe(urb->dev, 0),
1357 			USB_REQ_SET_FEATURE, USB_RECIP_ENDPOINT,
1358 			USB_ENDPOINT_HALT, ep,
1359 			NULL, 0, USB_CTRL_SET_TIMEOUT);
1360 	if (retval < 0) {
1361 		ERROR(tdev, "ep %02x couldn't set halt, %d\n", ep, retval);
1362 		return retval;
1363 	}
1364 	retval = verify_halted(tdev, ep, urb);
1365 	if (retval < 0)
1366 		return retval;
1367 
1368 	/* clear halt (tests API + protocol), verify it worked */
1369 	retval = usb_clear_halt(urb->dev, urb->pipe);
1370 	if (retval < 0) {
1371 		ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval);
1372 		return retval;
1373 	}
1374 	retval = verify_not_halted(tdev, ep, urb);
1375 	if (retval < 0)
1376 		return retval;
1377 
1378 	/* NOTE:  could also verify SET_INTERFACE clear halts ... */
1379 
1380 	return 0;
1381 }
1382 
1383 static int halt_simple(struct usbtest_dev *dev)
1384 {
1385 	int		ep;
1386 	int		retval = 0;
1387 	struct urb	*urb;
1388 
1389 	urb = simple_alloc_urb(testdev_to_usbdev(dev), 0, 512);
1390 	if (urb == NULL)
1391 		return -ENOMEM;
1392 
1393 	if (dev->in_pipe) {
1394 		ep = usb_pipeendpoint(dev->in_pipe) | USB_DIR_IN;
1395 		urb->pipe = dev->in_pipe;
1396 		retval = test_halt(dev, ep, urb);
1397 		if (retval < 0)
1398 			goto done;
1399 	}
1400 
1401 	if (dev->out_pipe) {
1402 		ep = usb_pipeendpoint(dev->out_pipe);
1403 		urb->pipe = dev->out_pipe;
1404 		retval = test_halt(dev, ep, urb);
1405 	}
1406 done:
1407 	simple_free_urb(urb);
1408 	return retval;
1409 }
1410 
1411 /*-------------------------------------------------------------------------*/
1412 
1413 /* Control OUT tests use the vendor control requests from Intel's
1414  * USB 2.0 compliance test device:  write a buffer, read it back.
1415  *
1416  * Intel's spec only _requires_ that it work for one packet, which
1417  * is pretty weak.   Some HCDs place limits here; most devices will
1418  * need to be able to handle more than one OUT data packet.  We'll
1419  * try whatever we're told to try.
1420  */
1421 static int ctrl_out(struct usbtest_dev *dev,
1422 		unsigned count, unsigned length, unsigned vary, unsigned offset)
1423 {
1424 	unsigned		i, j, len;
1425 	int			retval;
1426 	u8			*buf;
1427 	char			*what = "?";
1428 	struct usb_device	*udev;
1429 
1430 	if (length < 1 || length > 0xffff || vary >= length)
1431 		return -EINVAL;
1432 
1433 	buf = kmalloc(length + offset, GFP_KERNEL);
1434 	if (!buf)
1435 		return -ENOMEM;
1436 
1437 	buf += offset;
1438 	udev = testdev_to_usbdev(dev);
1439 	len = length;
1440 	retval = 0;
1441 
1442 	/* NOTE:  hardware might well act differently if we pushed it
1443 	 * with lots back-to-back queued requests.
1444 	 */
1445 	for (i = 0; i < count; i++) {
1446 		/* write patterned data */
1447 		for (j = 0; j < len; j++)
1448 			buf[j] = i + j;
1449 		retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
1450 				0x5b, USB_DIR_OUT|USB_TYPE_VENDOR,
1451 				0, 0, buf, len, USB_CTRL_SET_TIMEOUT);
1452 		if (retval != len) {
1453 			what = "write";
1454 			if (retval >= 0) {
1455 				ERROR(dev, "ctrl_out, wlen %d (expected %d)\n",
1456 						retval, len);
1457 				retval = -EBADMSG;
1458 			}
1459 			break;
1460 		}
1461 
1462 		/* read it back -- assuming nothing intervened!!  */
1463 		retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
1464 				0x5c, USB_DIR_IN|USB_TYPE_VENDOR,
1465 				0, 0, buf, len, USB_CTRL_GET_TIMEOUT);
1466 		if (retval != len) {
1467 			what = "read";
1468 			if (retval >= 0) {
1469 				ERROR(dev, "ctrl_out, rlen %d (expected %d)\n",
1470 						retval, len);
1471 				retval = -EBADMSG;
1472 			}
1473 			break;
1474 		}
1475 
1476 		/* fail if we can't verify */
1477 		for (j = 0; j < len; j++) {
1478 			if (buf[j] != (u8) (i + j)) {
1479 				ERROR(dev, "ctrl_out, byte %d is %d not %d\n",
1480 					j, buf[j], (u8) i + j);
1481 				retval = -EBADMSG;
1482 				break;
1483 			}
1484 		}
1485 		if (retval < 0) {
1486 			what = "verify";
1487 			break;
1488 		}
1489 
1490 		len += vary;
1491 
1492 		/* [real world] the "zero bytes IN" case isn't really used.
1493 		 * hardware can easily trip up in this weird case, since its
1494 		 * status stage is IN, not OUT like other ep0in transfers.
1495 		 */
1496 		if (len > length)
1497 			len = realworld ? 1 : 0;
1498 	}
1499 
1500 	if (retval < 0)
1501 		ERROR(dev, "ctrl_out %s failed, code %d, count %d\n",
1502 			what, retval, i);
1503 
1504 	kfree(buf - offset);
1505 	return retval;
1506 }
1507 
1508 /*-------------------------------------------------------------------------*/
1509 
1510 /* ISO tests ... mimics common usage
1511  *  - buffer length is split into N packets (mostly maxpacket sized)
1512  *  - multi-buffers according to sglen
1513  */
1514 
1515 struct iso_context {
1516 	unsigned		count;
1517 	unsigned		pending;
1518 	spinlock_t		lock;
1519 	struct completion	done;
1520 	int			submit_error;
1521 	unsigned long		errors;
1522 	unsigned long		packet_count;
1523 	struct usbtest_dev	*dev;
1524 };
1525 
1526 static void iso_callback(struct urb *urb)
1527 {
1528 	struct iso_context	*ctx = urb->context;
1529 
1530 	spin_lock(&ctx->lock);
1531 	ctx->count--;
1532 
1533 	ctx->packet_count += urb->number_of_packets;
1534 	if (urb->error_count > 0)
1535 		ctx->errors += urb->error_count;
1536 	else if (urb->status != 0)
1537 		ctx->errors += urb->number_of_packets;
1538 	else if (urb->actual_length != urb->transfer_buffer_length)
1539 		ctx->errors++;
1540 	else if (check_guard_bytes(ctx->dev, urb) != 0)
1541 		ctx->errors++;
1542 
1543 	if (urb->status == 0 && ctx->count > (ctx->pending - 1)
1544 			&& !ctx->submit_error) {
1545 		int status = usb_submit_urb(urb, GFP_ATOMIC);
1546 		switch (status) {
1547 		case 0:
1548 			goto done;
1549 		default:
1550 			dev_err(&ctx->dev->intf->dev,
1551 					"iso resubmit err %d\n",
1552 					status);
1553 			/* FALLTHROUGH */
1554 		case -ENODEV:			/* disconnected */
1555 		case -ESHUTDOWN:		/* endpoint disabled */
1556 			ctx->submit_error = 1;
1557 			break;
1558 		}
1559 	}
1560 
1561 	ctx->pending--;
1562 	if (ctx->pending == 0) {
1563 		if (ctx->errors)
1564 			dev_err(&ctx->dev->intf->dev,
1565 				"iso test, %lu errors out of %lu\n",
1566 				ctx->errors, ctx->packet_count);
1567 		complete(&ctx->done);
1568 	}
1569 done:
1570 	spin_unlock(&ctx->lock);
1571 }
1572 
1573 static struct urb *iso_alloc_urb(
1574 	struct usb_device	*udev,
1575 	int			pipe,
1576 	struct usb_endpoint_descriptor	*desc,
1577 	long			bytes,
1578 	unsigned offset
1579 )
1580 {
1581 	struct urb		*urb;
1582 	unsigned		i, maxp, packets;
1583 
1584 	if (bytes < 0 || !desc)
1585 		return NULL;
1586 	maxp = 0x7ff & le16_to_cpu(desc->wMaxPacketSize);
1587 	maxp *= 1 + (0x3 & (le16_to_cpu(desc->wMaxPacketSize) >> 11));
1588 	packets = DIV_ROUND_UP(bytes, maxp);
1589 
1590 	urb = usb_alloc_urb(packets, GFP_KERNEL);
1591 	if (!urb)
1592 		return urb;
1593 	urb->dev = udev;
1594 	urb->pipe = pipe;
1595 
1596 	urb->number_of_packets = packets;
1597 	urb->transfer_buffer_length = bytes;
1598 	urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
1599 							GFP_KERNEL,
1600 							&urb->transfer_dma);
1601 	if (!urb->transfer_buffer) {
1602 		usb_free_urb(urb);
1603 		return NULL;
1604 	}
1605 	if (offset) {
1606 		memset(urb->transfer_buffer, GUARD_BYTE, offset);
1607 		urb->transfer_buffer += offset;
1608 		urb->transfer_dma += offset;
1609 	}
1610 	/* For inbound transfers use guard byte so that test fails if
1611 		data not correctly copied */
1612 	memset(urb->transfer_buffer,
1613 			usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
1614 			bytes);
1615 
1616 	for (i = 0; i < packets; i++) {
1617 		/* here, only the last packet will be short */
1618 		urb->iso_frame_desc[i].length = min((unsigned) bytes, maxp);
1619 		bytes -= urb->iso_frame_desc[i].length;
1620 
1621 		urb->iso_frame_desc[i].offset = maxp * i;
1622 	}
1623 
1624 	urb->complete = iso_callback;
1625 	/* urb->context = SET BY CALLER */
1626 	urb->interval = 1 << (desc->bInterval - 1);
1627 	urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
1628 	return urb;
1629 }
1630 
1631 static int
1632 test_iso_queue(struct usbtest_dev *dev, struct usbtest_param *param,
1633 		int pipe, struct usb_endpoint_descriptor *desc, unsigned offset)
1634 {
1635 	struct iso_context	context;
1636 	struct usb_device	*udev;
1637 	unsigned		i;
1638 	unsigned long		packets = 0;
1639 	int			status = 0;
1640 	struct urb		*urbs[10];	/* FIXME no limit */
1641 
1642 	if (param->sglen > 10)
1643 		return -EDOM;
1644 
1645 	memset(&context, 0, sizeof context);
1646 	context.count = param->iterations * param->sglen;
1647 	context.dev = dev;
1648 	init_completion(&context.done);
1649 	spin_lock_init(&context.lock);
1650 
1651 	memset(urbs, 0, sizeof urbs);
1652 	udev = testdev_to_usbdev(dev);
1653 	dev_info(&dev->intf->dev,
1654 		"... iso period %d %sframes, wMaxPacket %04x\n",
1655 		1 << (desc->bInterval - 1),
1656 		(udev->speed == USB_SPEED_HIGH) ? "micro" : "",
1657 		le16_to_cpu(desc->wMaxPacketSize));
1658 
1659 	for (i = 0; i < param->sglen; i++) {
1660 		urbs[i] = iso_alloc_urb(udev, pipe, desc,
1661 					param->length, offset);
1662 		if (!urbs[i]) {
1663 			status = -ENOMEM;
1664 			goto fail;
1665 		}
1666 		packets += urbs[i]->number_of_packets;
1667 		urbs[i]->context = &context;
1668 	}
1669 	packets *= param->iterations;
1670 	dev_info(&dev->intf->dev,
1671 		"... total %lu msec (%lu packets)\n",
1672 		(packets * (1 << (desc->bInterval - 1)))
1673 			/ ((udev->speed == USB_SPEED_HIGH) ? 8 : 1),
1674 		packets);
1675 
1676 	spin_lock_irq(&context.lock);
1677 	for (i = 0; i < param->sglen; i++) {
1678 		++context.pending;
1679 		status = usb_submit_urb(urbs[i], GFP_ATOMIC);
1680 		if (status < 0) {
1681 			ERROR(dev, "submit iso[%d], error %d\n", i, status);
1682 			if (i == 0) {
1683 				spin_unlock_irq(&context.lock);
1684 				goto fail;
1685 			}
1686 
1687 			simple_free_urb(urbs[i]);
1688 			urbs[i] = NULL;
1689 			context.pending--;
1690 			context.submit_error = 1;
1691 			break;
1692 		}
1693 	}
1694 	spin_unlock_irq(&context.lock);
1695 
1696 	wait_for_completion(&context.done);
1697 
1698 	for (i = 0; i < param->sglen; i++) {
1699 		if (urbs[i])
1700 			simple_free_urb(urbs[i]);
1701 	}
1702 	/*
1703 	 * Isochronous transfers are expected to fail sometimes.  As an
1704 	 * arbitrary limit, we will report an error if any submissions
1705 	 * fail or if the transfer failure rate is > 10%.
1706 	 */
1707 	if (status != 0)
1708 		;
1709 	else if (context.submit_error)
1710 		status = -EACCES;
1711 	else if (context.errors > context.packet_count / 10)
1712 		status = -EIO;
1713 	return status;
1714 
1715 fail:
1716 	for (i = 0; i < param->sglen; i++) {
1717 		if (urbs[i])
1718 			simple_free_urb(urbs[i]);
1719 	}
1720 	return status;
1721 }
1722 
1723 static int test_unaligned_bulk(
1724 	struct usbtest_dev *tdev,
1725 	int pipe,
1726 	unsigned length,
1727 	int iterations,
1728 	unsigned transfer_flags,
1729 	const char *label)
1730 {
1731 	int retval;
1732 	struct urb *urb = usbtest_alloc_urb(
1733 		testdev_to_usbdev(tdev), pipe, length, transfer_flags, 1);
1734 
1735 	if (!urb)
1736 		return -ENOMEM;
1737 
1738 	retval = simple_io(tdev, urb, iterations, 0, 0, label);
1739 	simple_free_urb(urb);
1740 	return retval;
1741 }
1742 
1743 /*-------------------------------------------------------------------------*/
1744 
1745 /* We only have this one interface to user space, through usbfs.
1746  * User mode code can scan usbfs to find N different devices (maybe on
1747  * different busses) to use when testing, and allocate one thread per
1748  * test.  So discovery is simplified, and we have no device naming issues.
1749  *
1750  * Don't use these only as stress/load tests.  Use them along with with
1751  * other USB bus activity:  plugging, unplugging, mousing, mp3 playback,
1752  * video capture, and so on.  Run different tests at different times, in
1753  * different sequences.  Nothing here should interact with other devices,
1754  * except indirectly by consuming USB bandwidth and CPU resources for test
1755  * threads and request completion.  But the only way to know that for sure
1756  * is to test when HC queues are in use by many devices.
1757  *
1758  * WARNING:  Because usbfs grabs udev->dev.sem before calling this ioctl(),
1759  * it locks out usbcore in certain code paths.  Notably, if you disconnect
1760  * the device-under-test, khubd will wait block forever waiting for the
1761  * ioctl to complete ... so that usb_disconnect() can abort the pending
1762  * urbs and then call usbtest_disconnect().  To abort a test, you're best
1763  * off just killing the userspace task and waiting for it to exit.
1764  */
1765 
1766 /* No BKL needed */
1767 static int
1768 usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
1769 {
1770 	struct usbtest_dev	*dev = usb_get_intfdata(intf);
1771 	struct usb_device	*udev = testdev_to_usbdev(dev);
1772 	struct usbtest_param	*param = buf;
1773 	int			retval = -EOPNOTSUPP;
1774 	struct urb		*urb;
1775 	struct scatterlist	*sg;
1776 	struct usb_sg_request	req;
1777 	struct timeval		start;
1778 	unsigned		i;
1779 
1780 	/* FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is. */
1781 
1782 	pattern = mod_pattern;
1783 
1784 	if (code != USBTEST_REQUEST)
1785 		return -EOPNOTSUPP;
1786 
1787 	if (param->iterations <= 0)
1788 		return -EINVAL;
1789 
1790 	if (mutex_lock_interruptible(&dev->lock))
1791 		return -ERESTARTSYS;
1792 
1793 	/* FIXME: What if a system sleep starts while a test is running? */
1794 
1795 	/* some devices, like ez-usb default devices, need a non-default
1796 	 * altsetting to have any active endpoints.  some tests change
1797 	 * altsettings; force a default so most tests don't need to check.
1798 	 */
1799 	if (dev->info->alt >= 0) {
1800 		int	res;
1801 
1802 		if (intf->altsetting->desc.bInterfaceNumber) {
1803 			mutex_unlock(&dev->lock);
1804 			return -ENODEV;
1805 		}
1806 		res = set_altsetting(dev, dev->info->alt);
1807 		if (res) {
1808 			dev_err(&intf->dev,
1809 					"set altsetting to %d failed, %d\n",
1810 					dev->info->alt, res);
1811 			mutex_unlock(&dev->lock);
1812 			return res;
1813 		}
1814 	}
1815 
1816 	/*
1817 	 * Just a bunch of test cases that every HCD is expected to handle.
1818 	 *
1819 	 * Some may need specific firmware, though it'd be good to have
1820 	 * one firmware image to handle all the test cases.
1821 	 *
1822 	 * FIXME add more tests!  cancel requests, verify the data, control
1823 	 * queueing, concurrent read+write threads, and so on.
1824 	 */
1825 	do_gettimeofday(&start);
1826 	switch (param->test_num) {
1827 
1828 	case 0:
1829 		dev_info(&intf->dev, "TEST 0:  NOP\n");
1830 		retval = 0;
1831 		break;
1832 
1833 	/* Simple non-queued bulk I/O tests */
1834 	case 1:
1835 		if (dev->out_pipe == 0)
1836 			break;
1837 		dev_info(&intf->dev,
1838 				"TEST 1:  write %d bytes %u times\n",
1839 				param->length, param->iterations);
1840 		urb = simple_alloc_urb(udev, dev->out_pipe, param->length);
1841 		if (!urb) {
1842 			retval = -ENOMEM;
1843 			break;
1844 		}
1845 		/* FIRMWARE:  bulk sink (maybe accepts short writes) */
1846 		retval = simple_io(dev, urb, param->iterations, 0, 0, "test1");
1847 		simple_free_urb(urb);
1848 		break;
1849 	case 2:
1850 		if (dev->in_pipe == 0)
1851 			break;
1852 		dev_info(&intf->dev,
1853 				"TEST 2:  read %d bytes %u times\n",
1854 				param->length, param->iterations);
1855 		urb = simple_alloc_urb(udev, dev->in_pipe, param->length);
1856 		if (!urb) {
1857 			retval = -ENOMEM;
1858 			break;
1859 		}
1860 		/* FIRMWARE:  bulk source (maybe generates short writes) */
1861 		retval = simple_io(dev, urb, param->iterations, 0, 0, "test2");
1862 		simple_free_urb(urb);
1863 		break;
1864 	case 3:
1865 		if (dev->out_pipe == 0 || param->vary == 0)
1866 			break;
1867 		dev_info(&intf->dev,
1868 				"TEST 3:  write/%d 0..%d bytes %u times\n",
1869 				param->vary, param->length, param->iterations);
1870 		urb = simple_alloc_urb(udev, dev->out_pipe, param->length);
1871 		if (!urb) {
1872 			retval = -ENOMEM;
1873 			break;
1874 		}
1875 		/* FIRMWARE:  bulk sink (maybe accepts short writes) */
1876 		retval = simple_io(dev, urb, param->iterations, param->vary,
1877 					0, "test3");
1878 		simple_free_urb(urb);
1879 		break;
1880 	case 4:
1881 		if (dev->in_pipe == 0 || param->vary == 0)
1882 			break;
1883 		dev_info(&intf->dev,
1884 				"TEST 4:  read/%d 0..%d bytes %u times\n",
1885 				param->vary, param->length, param->iterations);
1886 		urb = simple_alloc_urb(udev, dev->in_pipe, param->length);
1887 		if (!urb) {
1888 			retval = -ENOMEM;
1889 			break;
1890 		}
1891 		/* FIRMWARE:  bulk source (maybe generates short writes) */
1892 		retval = simple_io(dev, urb, param->iterations, param->vary,
1893 					0, "test4");
1894 		simple_free_urb(urb);
1895 		break;
1896 
1897 	/* Queued bulk I/O tests */
1898 	case 5:
1899 		if (dev->out_pipe == 0 || param->sglen == 0)
1900 			break;
1901 		dev_info(&intf->dev,
1902 			"TEST 5:  write %d sglists %d entries of %d bytes\n",
1903 				param->iterations,
1904 				param->sglen, param->length);
1905 		sg = alloc_sglist(param->sglen, param->length, 0);
1906 		if (!sg) {
1907 			retval = -ENOMEM;
1908 			break;
1909 		}
1910 		/* FIRMWARE:  bulk sink (maybe accepts short writes) */
1911 		retval = perform_sglist(dev, param->iterations, dev->out_pipe,
1912 				&req, sg, param->sglen);
1913 		free_sglist(sg, param->sglen);
1914 		break;
1915 
1916 	case 6:
1917 		if (dev->in_pipe == 0 || param->sglen == 0)
1918 			break;
1919 		dev_info(&intf->dev,
1920 			"TEST 6:  read %d sglists %d entries of %d bytes\n",
1921 				param->iterations,
1922 				param->sglen, param->length);
1923 		sg = alloc_sglist(param->sglen, param->length, 0);
1924 		if (!sg) {
1925 			retval = -ENOMEM;
1926 			break;
1927 		}
1928 		/* FIRMWARE:  bulk source (maybe generates short writes) */
1929 		retval = perform_sglist(dev, param->iterations, dev->in_pipe,
1930 				&req, sg, param->sglen);
1931 		free_sglist(sg, param->sglen);
1932 		break;
1933 	case 7:
1934 		if (dev->out_pipe == 0 || param->sglen == 0 || param->vary == 0)
1935 			break;
1936 		dev_info(&intf->dev,
1937 			"TEST 7:  write/%d %d sglists %d entries 0..%d bytes\n",
1938 				param->vary, param->iterations,
1939 				param->sglen, param->length);
1940 		sg = alloc_sglist(param->sglen, param->length, param->vary);
1941 		if (!sg) {
1942 			retval = -ENOMEM;
1943 			break;
1944 		}
1945 		/* FIRMWARE:  bulk sink (maybe accepts short writes) */
1946 		retval = perform_sglist(dev, param->iterations, dev->out_pipe,
1947 				&req, sg, param->sglen);
1948 		free_sglist(sg, param->sglen);
1949 		break;
1950 	case 8:
1951 		if (dev->in_pipe == 0 || param->sglen == 0 || param->vary == 0)
1952 			break;
1953 		dev_info(&intf->dev,
1954 			"TEST 8:  read/%d %d sglists %d entries 0..%d bytes\n",
1955 				param->vary, param->iterations,
1956 				param->sglen, param->length);
1957 		sg = alloc_sglist(param->sglen, param->length, param->vary);
1958 		if (!sg) {
1959 			retval = -ENOMEM;
1960 			break;
1961 		}
1962 		/* FIRMWARE:  bulk source (maybe generates short writes) */
1963 		retval = perform_sglist(dev, param->iterations, dev->in_pipe,
1964 				&req, sg, param->sglen);
1965 		free_sglist(sg, param->sglen);
1966 		break;
1967 
1968 	/* non-queued sanity tests for control (chapter 9 subset) */
1969 	case 9:
1970 		retval = 0;
1971 		dev_info(&intf->dev,
1972 			"TEST 9:  ch9 (subset) control tests, %d times\n",
1973 				param->iterations);
1974 		for (i = param->iterations; retval == 0 && i--; /* NOP */)
1975 			retval = ch9_postconfig(dev);
1976 		if (retval)
1977 			dev_err(&intf->dev, "ch9 subset failed, "
1978 					"iterations left %d\n", i);
1979 		break;
1980 
1981 	/* queued control messaging */
1982 	case 10:
1983 		if (param->sglen == 0)
1984 			break;
1985 		retval = 0;
1986 		dev_info(&intf->dev,
1987 				"TEST 10:  queue %d control calls, %d times\n",
1988 				param->sglen,
1989 				param->iterations);
1990 		retval = test_ctrl_queue(dev, param);
1991 		break;
1992 
1993 	/* simple non-queued unlinks (ring with one urb) */
1994 	case 11:
1995 		if (dev->in_pipe == 0 || !param->length)
1996 			break;
1997 		retval = 0;
1998 		dev_info(&intf->dev, "TEST 11:  unlink %d reads of %d\n",
1999 				param->iterations, param->length);
2000 		for (i = param->iterations; retval == 0 && i--; /* NOP */)
2001 			retval = unlink_simple(dev, dev->in_pipe,
2002 						param->length);
2003 		if (retval)
2004 			dev_err(&intf->dev, "unlink reads failed %d, "
2005 				"iterations left %d\n", retval, i);
2006 		break;
2007 	case 12:
2008 		if (dev->out_pipe == 0 || !param->length)
2009 			break;
2010 		retval = 0;
2011 		dev_info(&intf->dev, "TEST 12:  unlink %d writes of %d\n",
2012 				param->iterations, param->length);
2013 		for (i = param->iterations; retval == 0 && i--; /* NOP */)
2014 			retval = unlink_simple(dev, dev->out_pipe,
2015 						param->length);
2016 		if (retval)
2017 			dev_err(&intf->dev, "unlink writes failed %d, "
2018 				"iterations left %d\n", retval, i);
2019 		break;
2020 
2021 	/* ep halt tests */
2022 	case 13:
2023 		if (dev->out_pipe == 0 && dev->in_pipe == 0)
2024 			break;
2025 		retval = 0;
2026 		dev_info(&intf->dev, "TEST 13:  set/clear %d halts\n",
2027 				param->iterations);
2028 		for (i = param->iterations; retval == 0 && i--; /* NOP */)
2029 			retval = halt_simple(dev);
2030 
2031 		if (retval)
2032 			ERROR(dev, "halts failed, iterations left %d\n", i);
2033 		break;
2034 
2035 	/* control write tests */
2036 	case 14:
2037 		if (!dev->info->ctrl_out)
2038 			break;
2039 		dev_info(&intf->dev, "TEST 14:  %d ep0out, %d..%d vary %d\n",
2040 				param->iterations,
2041 				realworld ? 1 : 0, param->length,
2042 				param->vary);
2043 		retval = ctrl_out(dev, param->iterations,
2044 				param->length, param->vary, 0);
2045 		break;
2046 
2047 	/* iso write tests */
2048 	case 15:
2049 		if (dev->out_iso_pipe == 0 || param->sglen == 0)
2050 			break;
2051 		dev_info(&intf->dev,
2052 			"TEST 15:  write %d iso, %d entries of %d bytes\n",
2053 				param->iterations,
2054 				param->sglen, param->length);
2055 		/* FIRMWARE:  iso sink */
2056 		retval = test_iso_queue(dev, param,
2057 				dev->out_iso_pipe, dev->iso_out, 0);
2058 		break;
2059 
2060 	/* iso read tests */
2061 	case 16:
2062 		if (dev->in_iso_pipe == 0 || param->sglen == 0)
2063 			break;
2064 		dev_info(&intf->dev,
2065 			"TEST 16:  read %d iso, %d entries of %d bytes\n",
2066 				param->iterations,
2067 				param->sglen, param->length);
2068 		/* FIRMWARE:  iso source */
2069 		retval = test_iso_queue(dev, param,
2070 				dev->in_iso_pipe, dev->iso_in, 0);
2071 		break;
2072 
2073 	/* FIXME scatterlist cancel (needs helper thread) */
2074 
2075 	/* Tests for bulk I/O using DMA mapping by core and odd address */
2076 	case 17:
2077 		if (dev->out_pipe == 0)
2078 			break;
2079 		dev_info(&intf->dev,
2080 			"TEST 17:  write odd addr %d bytes %u times core map\n",
2081 			param->length, param->iterations);
2082 
2083 		retval = test_unaligned_bulk(
2084 				dev, dev->out_pipe,
2085 				param->length, param->iterations,
2086 				0, "test17");
2087 		break;
2088 
2089 	case 18:
2090 		if (dev->in_pipe == 0)
2091 			break;
2092 		dev_info(&intf->dev,
2093 			"TEST 18:  read odd addr %d bytes %u times core map\n",
2094 			param->length, param->iterations);
2095 
2096 		retval = test_unaligned_bulk(
2097 				dev, dev->in_pipe,
2098 				param->length, param->iterations,
2099 				0, "test18");
2100 		break;
2101 
2102 	/* Tests for bulk I/O using premapped coherent buffer and odd address */
2103 	case 19:
2104 		if (dev->out_pipe == 0)
2105 			break;
2106 		dev_info(&intf->dev,
2107 			"TEST 19:  write odd addr %d bytes %u times premapped\n",
2108 			param->length, param->iterations);
2109 
2110 		retval = test_unaligned_bulk(
2111 				dev, dev->out_pipe,
2112 				param->length, param->iterations,
2113 				URB_NO_TRANSFER_DMA_MAP, "test19");
2114 		break;
2115 
2116 	case 20:
2117 		if (dev->in_pipe == 0)
2118 			break;
2119 		dev_info(&intf->dev,
2120 			"TEST 20:  read odd addr %d bytes %u times premapped\n",
2121 			param->length, param->iterations);
2122 
2123 		retval = test_unaligned_bulk(
2124 				dev, dev->in_pipe,
2125 				param->length, param->iterations,
2126 				URB_NO_TRANSFER_DMA_MAP, "test20");
2127 		break;
2128 
2129 	/* control write tests with unaligned buffer */
2130 	case 21:
2131 		if (!dev->info->ctrl_out)
2132 			break;
2133 		dev_info(&intf->dev,
2134 				"TEST 21:  %d ep0out odd addr, %d..%d vary %d\n",
2135 				param->iterations,
2136 				realworld ? 1 : 0, param->length,
2137 				param->vary);
2138 		retval = ctrl_out(dev, param->iterations,
2139 				param->length, param->vary, 1);
2140 		break;
2141 
2142 	/* unaligned iso tests */
2143 	case 22:
2144 		if (dev->out_iso_pipe == 0 || param->sglen == 0)
2145 			break;
2146 		dev_info(&intf->dev,
2147 			"TEST 22:  write %d iso odd, %d entries of %d bytes\n",
2148 				param->iterations,
2149 				param->sglen, param->length);
2150 		retval = test_iso_queue(dev, param,
2151 				dev->out_iso_pipe, dev->iso_out, 1);
2152 		break;
2153 
2154 	case 23:
2155 		if (dev->in_iso_pipe == 0 || param->sglen == 0)
2156 			break;
2157 		dev_info(&intf->dev,
2158 			"TEST 23:  read %d iso odd, %d entries of %d bytes\n",
2159 				param->iterations,
2160 				param->sglen, param->length);
2161 		retval = test_iso_queue(dev, param,
2162 				dev->in_iso_pipe, dev->iso_in, 1);
2163 		break;
2164 
2165 	/* unlink URBs from a bulk-OUT queue */
2166 	case 24:
2167 		if (dev->out_pipe == 0 || !param->length || param->sglen < 4)
2168 			break;
2169 		retval = 0;
2170 		dev_info(&intf->dev, "TEST 17:  unlink from %d queues of "
2171 				"%d %d-byte writes\n",
2172 				param->iterations, param->sglen, param->length);
2173 		for (i = param->iterations; retval == 0 && i > 0; --i) {
2174 			retval = unlink_queued(dev, dev->out_pipe,
2175 						param->sglen, param->length);
2176 			if (retval) {
2177 				dev_err(&intf->dev,
2178 					"unlink queued writes failed %d, "
2179 					"iterations left %d\n", retval, i);
2180 				break;
2181 			}
2182 		}
2183 		break;
2184 
2185 	}
2186 	do_gettimeofday(&param->duration);
2187 	param->duration.tv_sec -= start.tv_sec;
2188 	param->duration.tv_usec -= start.tv_usec;
2189 	if (param->duration.tv_usec < 0) {
2190 		param->duration.tv_usec += 1000 * 1000;
2191 		param->duration.tv_sec -= 1;
2192 	}
2193 	mutex_unlock(&dev->lock);
2194 	return retval;
2195 }
2196 
2197 /*-------------------------------------------------------------------------*/
2198 
2199 static unsigned force_interrupt;
2200 module_param(force_interrupt, uint, 0);
2201 MODULE_PARM_DESC(force_interrupt, "0 = test default; else interrupt");
2202 
2203 #ifdef	GENERIC
2204 static unsigned short vendor;
2205 module_param(vendor, ushort, 0);
2206 MODULE_PARM_DESC(vendor, "vendor code (from usb-if)");
2207 
2208 static unsigned short product;
2209 module_param(product, ushort, 0);
2210 MODULE_PARM_DESC(product, "product code (from vendor)");
2211 #endif
2212 
2213 static int
2214 usbtest_probe(struct usb_interface *intf, const struct usb_device_id *id)
2215 {
2216 	struct usb_device	*udev;
2217 	struct usbtest_dev	*dev;
2218 	struct usbtest_info	*info;
2219 	char			*rtest, *wtest;
2220 	char			*irtest, *iwtest;
2221 
2222 	udev = interface_to_usbdev(intf);
2223 
2224 #ifdef	GENERIC
2225 	/* specify devices by module parameters? */
2226 	if (id->match_flags == 0) {
2227 		/* vendor match required, product match optional */
2228 		if (!vendor || le16_to_cpu(udev->descriptor.idVendor) != (u16)vendor)
2229 			return -ENODEV;
2230 		if (product && le16_to_cpu(udev->descriptor.idProduct) != (u16)product)
2231 			return -ENODEV;
2232 		dev_info(&intf->dev, "matched module params, "
2233 					"vend=0x%04x prod=0x%04x\n",
2234 				le16_to_cpu(udev->descriptor.idVendor),
2235 				le16_to_cpu(udev->descriptor.idProduct));
2236 	}
2237 #endif
2238 
2239 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2240 	if (!dev)
2241 		return -ENOMEM;
2242 	info = (struct usbtest_info *) id->driver_info;
2243 	dev->info = info;
2244 	mutex_init(&dev->lock);
2245 
2246 	dev->intf = intf;
2247 
2248 	/* cacheline-aligned scratch for i/o */
2249 	dev->buf = kmalloc(TBUF_SIZE, GFP_KERNEL);
2250 	if (dev->buf == NULL) {
2251 		kfree(dev);
2252 		return -ENOMEM;
2253 	}
2254 
2255 	/* NOTE this doesn't yet test the handful of difference that are
2256 	 * visible with high speed interrupts:  bigger maxpacket (1K) and
2257 	 * "high bandwidth" modes (up to 3 packets/uframe).
2258 	 */
2259 	rtest = wtest = "";
2260 	irtest = iwtest = "";
2261 	if (force_interrupt || udev->speed == USB_SPEED_LOW) {
2262 		if (info->ep_in) {
2263 			dev->in_pipe = usb_rcvintpipe(udev, info->ep_in);
2264 			rtest = " intr-in";
2265 		}
2266 		if (info->ep_out) {
2267 			dev->out_pipe = usb_sndintpipe(udev, info->ep_out);
2268 			wtest = " intr-out";
2269 		}
2270 	} else {
2271 		if (info->autoconf) {
2272 			int status;
2273 
2274 			status = get_endpoints(dev, intf);
2275 			if (status < 0) {
2276 				WARNING(dev, "couldn't get endpoints, %d\n",
2277 						status);
2278 				return status;
2279 			}
2280 			/* may find bulk or ISO pipes */
2281 		} else {
2282 			if (info->ep_in)
2283 				dev->in_pipe = usb_rcvbulkpipe(udev,
2284 							info->ep_in);
2285 			if (info->ep_out)
2286 				dev->out_pipe = usb_sndbulkpipe(udev,
2287 							info->ep_out);
2288 		}
2289 		if (dev->in_pipe)
2290 			rtest = " bulk-in";
2291 		if (dev->out_pipe)
2292 			wtest = " bulk-out";
2293 		if (dev->in_iso_pipe)
2294 			irtest = " iso-in";
2295 		if (dev->out_iso_pipe)
2296 			iwtest = " iso-out";
2297 	}
2298 
2299 	usb_set_intfdata(intf, dev);
2300 	dev_info(&intf->dev, "%s\n", info->name);
2301 	dev_info(&intf->dev, "%s speed {control%s%s%s%s%s} tests%s\n",
2302 			({ char *tmp;
2303 			switch (udev->speed) {
2304 			case USB_SPEED_LOW:
2305 				tmp = "low";
2306 				break;
2307 			case USB_SPEED_FULL:
2308 				tmp = "full";
2309 				break;
2310 			case USB_SPEED_HIGH:
2311 				tmp = "high";
2312 				break;
2313 			case USB_SPEED_SUPER:
2314 				tmp = "super";
2315 				break;
2316 			default:
2317 				tmp = "unknown";
2318 				break;
2319 			}; tmp; }),
2320 			info->ctrl_out ? " in/out" : "",
2321 			rtest, wtest,
2322 			irtest, iwtest,
2323 			info->alt >= 0 ? " (+alt)" : "");
2324 	return 0;
2325 }
2326 
2327 static int usbtest_suspend(struct usb_interface *intf, pm_message_t message)
2328 {
2329 	return 0;
2330 }
2331 
2332 static int usbtest_resume(struct usb_interface *intf)
2333 {
2334 	return 0;
2335 }
2336 
2337 
2338 static void usbtest_disconnect(struct usb_interface *intf)
2339 {
2340 	struct usbtest_dev	*dev = usb_get_intfdata(intf);
2341 
2342 	usb_set_intfdata(intf, NULL);
2343 	dev_dbg(&intf->dev, "disconnect\n");
2344 	kfree(dev);
2345 }
2346 
2347 /* Basic testing only needs a device that can source or sink bulk traffic.
2348  * Any device can test control transfers (default with GENERIC binding).
2349  *
2350  * Several entries work with the default EP0 implementation that's built
2351  * into EZ-USB chips.  There's a default vendor ID which can be overridden
2352  * by (very) small config EEPROMS, but otherwise all these devices act
2353  * identically until firmware is loaded:  only EP0 works.  It turns out
2354  * to be easy to make other endpoints work, without modifying that EP0
2355  * behavior.  For now, we expect that kind of firmware.
2356  */
2357 
2358 /* an21xx or fx versions of ez-usb */
2359 static struct usbtest_info ez1_info = {
2360 	.name		= "EZ-USB device",
2361 	.ep_in		= 2,
2362 	.ep_out		= 2,
2363 	.alt		= 1,
2364 };
2365 
2366 /* fx2 version of ez-usb */
2367 static struct usbtest_info ez2_info = {
2368 	.name		= "FX2 device",
2369 	.ep_in		= 6,
2370 	.ep_out		= 2,
2371 	.alt		= 1,
2372 };
2373 
2374 /* ezusb family device with dedicated usb test firmware,
2375  */
2376 static struct usbtest_info fw_info = {
2377 	.name		= "usb test device",
2378 	.ep_in		= 2,
2379 	.ep_out		= 2,
2380 	.alt		= 1,
2381 	.autoconf	= 1,		/* iso and ctrl_out need autoconf */
2382 	.ctrl_out	= 1,
2383 	.iso		= 1,		/* iso_ep's are #8 in/out */
2384 };
2385 
2386 /* peripheral running Linux and 'zero.c' test firmware, or
2387  * its user-mode cousin. different versions of this use
2388  * different hardware with the same vendor/product codes.
2389  * host side MUST rely on the endpoint descriptors.
2390  */
2391 static struct usbtest_info gz_info = {
2392 	.name		= "Linux gadget zero",
2393 	.autoconf	= 1,
2394 	.ctrl_out	= 1,
2395 	.alt		= 0,
2396 };
2397 
2398 static struct usbtest_info um_info = {
2399 	.name		= "Linux user mode test driver",
2400 	.autoconf	= 1,
2401 	.alt		= -1,
2402 };
2403 
2404 static struct usbtest_info um2_info = {
2405 	.name		= "Linux user mode ISO test driver",
2406 	.autoconf	= 1,
2407 	.iso		= 1,
2408 	.alt		= -1,
2409 };
2410 
2411 #ifdef IBOT2
2412 /* this is a nice source of high speed bulk data;
2413  * uses an FX2, with firmware provided in the device
2414  */
2415 static struct usbtest_info ibot2_info = {
2416 	.name		= "iBOT2 webcam",
2417 	.ep_in		= 2,
2418 	.alt		= -1,
2419 };
2420 #endif
2421 
2422 #ifdef GENERIC
2423 /* we can use any device to test control traffic */
2424 static struct usbtest_info generic_info = {
2425 	.name		= "Generic USB device",
2426 	.alt		= -1,
2427 };
2428 #endif
2429 
2430 
2431 static const struct usb_device_id id_table[] = {
2432 
2433 	/*-------------------------------------------------------------*/
2434 
2435 	/* EZ-USB devices which download firmware to replace (or in our
2436 	 * case augment) the default device implementation.
2437 	 */
2438 
2439 	/* generic EZ-USB FX controller */
2440 	{ USB_DEVICE(0x0547, 0x2235),
2441 		.driver_info = (unsigned long) &ez1_info,
2442 	},
2443 
2444 	/* CY3671 development board with EZ-USB FX */
2445 	{ USB_DEVICE(0x0547, 0x0080),
2446 		.driver_info = (unsigned long) &ez1_info,
2447 	},
2448 
2449 	/* generic EZ-USB FX2 controller (or development board) */
2450 	{ USB_DEVICE(0x04b4, 0x8613),
2451 		.driver_info = (unsigned long) &ez2_info,
2452 	},
2453 
2454 	/* re-enumerated usb test device firmware */
2455 	{ USB_DEVICE(0xfff0, 0xfff0),
2456 		.driver_info = (unsigned long) &fw_info,
2457 	},
2458 
2459 	/* "Gadget Zero" firmware runs under Linux */
2460 	{ USB_DEVICE(0x0525, 0xa4a0),
2461 		.driver_info = (unsigned long) &gz_info,
2462 	},
2463 
2464 	/* so does a user-mode variant */
2465 	{ USB_DEVICE(0x0525, 0xa4a4),
2466 		.driver_info = (unsigned long) &um_info,
2467 	},
2468 
2469 	/* ... and a user-mode variant that talks iso */
2470 	{ USB_DEVICE(0x0525, 0xa4a3),
2471 		.driver_info = (unsigned long) &um2_info,
2472 	},
2473 
2474 #ifdef KEYSPAN_19Qi
2475 	/* Keyspan 19qi uses an21xx (original EZ-USB) */
2476 	/* this does not coexist with the real Keyspan 19qi driver! */
2477 	{ USB_DEVICE(0x06cd, 0x010b),
2478 		.driver_info = (unsigned long) &ez1_info,
2479 	},
2480 #endif
2481 
2482 	/*-------------------------------------------------------------*/
2483 
2484 #ifdef IBOT2
2485 	/* iBOT2 makes a nice source of high speed bulk-in data */
2486 	/* this does not coexist with a real iBOT2 driver! */
2487 	{ USB_DEVICE(0x0b62, 0x0059),
2488 		.driver_info = (unsigned long) &ibot2_info,
2489 	},
2490 #endif
2491 
2492 	/*-------------------------------------------------------------*/
2493 
2494 #ifdef GENERIC
2495 	/* module params can specify devices to use for control tests */
2496 	{ .driver_info = (unsigned long) &generic_info, },
2497 #endif
2498 
2499 	/*-------------------------------------------------------------*/
2500 
2501 	{ }
2502 };
2503 MODULE_DEVICE_TABLE(usb, id_table);
2504 
2505 static struct usb_driver usbtest_driver = {
2506 	.name =		"usbtest",
2507 	.id_table =	id_table,
2508 	.probe =	usbtest_probe,
2509 	.unlocked_ioctl = usbtest_ioctl,
2510 	.disconnect =	usbtest_disconnect,
2511 	.suspend =	usbtest_suspend,
2512 	.resume =	usbtest_resume,
2513 };
2514 
2515 /*-------------------------------------------------------------------------*/
2516 
2517 static int __init usbtest_init(void)
2518 {
2519 #ifdef GENERIC
2520 	if (vendor)
2521 		pr_debug("params: vend=0x%04x prod=0x%04x\n", vendor, product);
2522 #endif
2523 	return usb_register(&usbtest_driver);
2524 }
2525 module_init(usbtest_init);
2526 
2527 static void __exit usbtest_exit(void)
2528 {
2529 	usb_deregister(&usbtest_driver);
2530 }
2531 module_exit(usbtest_exit);
2532 
2533 MODULE_DESCRIPTION("USB Core/HCD Testing Driver");
2534 MODULE_LICENSE("GPL");
2535 
2536