xref: /linux/drivers/usb/misc/usbtest.c (revision bd628c1bed7902ec1f24ba0fe70758949146abbe)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/init.h>
5 #include <linux/slab.h>
6 #include <linux/mm.h>
7 #include <linux/module.h>
8 #include <linux/moduleparam.h>
9 #include <linux/scatterlist.h>
10 #include <linux/mutex.h>
11 #include <linux/timer.h>
12 #include <linux/usb.h>
13 
14 #define SIMPLE_IO_TIMEOUT	10000	/* in milliseconds */
15 
16 /*-------------------------------------------------------------------------*/
17 
18 static int override_alt = -1;
19 module_param_named(alt, override_alt, int, 0644);
20 MODULE_PARM_DESC(alt, ">= 0 to override altsetting selection");
21 static void complicated_callback(struct urb *urb);
22 
23 /*-------------------------------------------------------------------------*/
24 
25 /* FIXME make these public somewhere; usbdevfs.h? */
26 
27 /* Parameter for usbtest driver. */
28 struct usbtest_param_32 {
29 	/* inputs */
30 	__u32		test_num;	/* 0..(TEST_CASES-1) */
31 	__u32		iterations;
32 	__u32		length;
33 	__u32		vary;
34 	__u32		sglen;
35 
36 	/* outputs */
37 	__s32		duration_sec;
38 	__s32		duration_usec;
39 };
40 
41 /*
42  * Compat parameter to the usbtest driver.
43  * This supports older user space binaries compiled with 64 bit compiler.
44  */
45 struct usbtest_param_64 {
46 	/* inputs */
47 	__u32		test_num;	/* 0..(TEST_CASES-1) */
48 	__u32		iterations;
49 	__u32		length;
50 	__u32		vary;
51 	__u32		sglen;
52 
53 	/* outputs */
54 	__s64		duration_sec;
55 	__s64		duration_usec;
56 };
57 
58 /* IOCTL interface to the driver. */
59 #define USBTEST_REQUEST_32    _IOWR('U', 100, struct usbtest_param_32)
60 /* COMPAT IOCTL interface to the driver. */
61 #define USBTEST_REQUEST_64    _IOWR('U', 100, struct usbtest_param_64)
62 
63 /*-------------------------------------------------------------------------*/
64 
65 #define	GENERIC		/* let probe() bind using module params */
66 
67 /* Some devices that can be used for testing will have "real" drivers.
68  * Entries for those need to be enabled here by hand, after disabling
69  * that "real" driver.
70  */
71 //#define	IBOT2		/* grab iBOT2 webcams */
72 //#define	KEYSPAN_19Qi	/* grab un-renumerated serial adapter */
73 
74 /*-------------------------------------------------------------------------*/
75 
76 struct usbtest_info {
77 	const char		*name;
78 	u8			ep_in;		/* bulk/intr source */
79 	u8			ep_out;		/* bulk/intr sink */
80 	unsigned		autoconf:1;
81 	unsigned		ctrl_out:1;
82 	unsigned		iso:1;		/* try iso in/out */
83 	unsigned		intr:1;		/* try interrupt in/out */
84 	int			alt;
85 };
86 
87 /* this is accessed only through usbfs ioctl calls.
88  * one ioctl to issue a test ... one lock per device.
89  * tests create other threads if they need them.
90  * urbs and buffers are allocated dynamically,
91  * and data generated deterministically.
92  */
93 struct usbtest_dev {
94 	struct usb_interface	*intf;
95 	struct usbtest_info	*info;
96 	int			in_pipe;
97 	int			out_pipe;
98 	int			in_iso_pipe;
99 	int			out_iso_pipe;
100 	int			in_int_pipe;
101 	int			out_int_pipe;
102 	struct usb_endpoint_descriptor	*iso_in, *iso_out;
103 	struct usb_endpoint_descriptor	*int_in, *int_out;
104 	struct mutex		lock;
105 
106 #define TBUF_SIZE	256
107 	u8			*buf;
108 };
109 
110 static struct usb_device *testdev_to_usbdev(struct usbtest_dev *test)
111 {
112 	return interface_to_usbdev(test->intf);
113 }
114 
115 /* set up all urbs so they can be used with either bulk or interrupt */
116 #define	INTERRUPT_RATE		1	/* msec/transfer */
117 
118 #define ERROR(tdev, fmt, args...) \
119 	dev_err(&(tdev)->intf->dev , fmt , ## args)
120 #define WARNING(tdev, fmt, args...) \
121 	dev_warn(&(tdev)->intf->dev , fmt , ## args)
122 
123 #define GUARD_BYTE	0xA5
124 #define MAX_SGLEN	128
125 
126 /*-------------------------------------------------------------------------*/
127 
128 static inline void endpoint_update(int edi,
129 				   struct usb_host_endpoint **in,
130 				   struct usb_host_endpoint **out,
131 				   struct usb_host_endpoint *e)
132 {
133 	if (edi) {
134 		if (!*in)
135 			*in = e;
136 	} else {
137 		if (!*out)
138 			*out = e;
139 	}
140 }
141 
142 static int
143 get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf)
144 {
145 	int				tmp;
146 	struct usb_host_interface	*alt;
147 	struct usb_host_endpoint	*in, *out;
148 	struct usb_host_endpoint	*iso_in, *iso_out;
149 	struct usb_host_endpoint	*int_in, *int_out;
150 	struct usb_device		*udev;
151 
152 	for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
153 		unsigned	ep;
154 
155 		in = out = NULL;
156 		iso_in = iso_out = NULL;
157 		int_in = int_out = NULL;
158 		alt = intf->altsetting + tmp;
159 
160 		if (override_alt >= 0 &&
161 				override_alt != alt->desc.bAlternateSetting)
162 			continue;
163 
164 		/* take the first altsetting with in-bulk + out-bulk;
165 		 * ignore other endpoints and altsettings.
166 		 */
167 		for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
168 			struct usb_host_endpoint	*e;
169 			int edi;
170 
171 			e = alt->endpoint + ep;
172 			edi = usb_endpoint_dir_in(&e->desc);
173 
174 			switch (usb_endpoint_type(&e->desc)) {
175 			case USB_ENDPOINT_XFER_BULK:
176 				endpoint_update(edi, &in, &out, e);
177 				continue;
178 			case USB_ENDPOINT_XFER_INT:
179 				if (dev->info->intr)
180 					endpoint_update(edi, &int_in, &int_out, e);
181 				continue;
182 			case USB_ENDPOINT_XFER_ISOC:
183 				if (dev->info->iso)
184 					endpoint_update(edi, &iso_in, &iso_out, e);
185 				/* FALLTHROUGH */
186 			default:
187 				continue;
188 			}
189 		}
190 		if ((in && out)  ||  iso_in || iso_out || int_in || int_out)
191 			goto found;
192 	}
193 	return -EINVAL;
194 
195 found:
196 	udev = testdev_to_usbdev(dev);
197 	dev->info->alt = alt->desc.bAlternateSetting;
198 	if (alt->desc.bAlternateSetting != 0) {
199 		tmp = usb_set_interface(udev,
200 				alt->desc.bInterfaceNumber,
201 				alt->desc.bAlternateSetting);
202 		if (tmp < 0)
203 			return tmp;
204 	}
205 
206 	if (in)
207 		dev->in_pipe = usb_rcvbulkpipe(udev,
208 			in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
209 	if (out)
210 		dev->out_pipe = usb_sndbulkpipe(udev,
211 			out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
212 
213 	if (iso_in) {
214 		dev->iso_in = &iso_in->desc;
215 		dev->in_iso_pipe = usb_rcvisocpipe(udev,
216 				iso_in->desc.bEndpointAddress
217 					& USB_ENDPOINT_NUMBER_MASK);
218 	}
219 
220 	if (iso_out) {
221 		dev->iso_out = &iso_out->desc;
222 		dev->out_iso_pipe = usb_sndisocpipe(udev,
223 				iso_out->desc.bEndpointAddress
224 					& USB_ENDPOINT_NUMBER_MASK);
225 	}
226 
227 	if (int_in) {
228 		dev->int_in = &int_in->desc;
229 		dev->in_int_pipe = usb_rcvintpipe(udev,
230 				int_in->desc.bEndpointAddress
231 					& USB_ENDPOINT_NUMBER_MASK);
232 	}
233 
234 	if (int_out) {
235 		dev->int_out = &int_out->desc;
236 		dev->out_int_pipe = usb_sndintpipe(udev,
237 				int_out->desc.bEndpointAddress
238 					& USB_ENDPOINT_NUMBER_MASK);
239 	}
240 	return 0;
241 }
242 
243 /*-------------------------------------------------------------------------*/
244 
245 /* Support for testing basic non-queued I/O streams.
246  *
247  * These just package urbs as requests that can be easily canceled.
248  * Each urb's data buffer is dynamically allocated; callers can fill
249  * them with non-zero test data (or test for it) when appropriate.
250  */
251 
252 static void simple_callback(struct urb *urb)
253 {
254 	complete(urb->context);
255 }
256 
257 static struct urb *usbtest_alloc_urb(
258 	struct usb_device	*udev,
259 	int			pipe,
260 	unsigned long		bytes,
261 	unsigned		transfer_flags,
262 	unsigned		offset,
263 	u8			bInterval,
264 	usb_complete_t		complete_fn)
265 {
266 	struct urb		*urb;
267 
268 	urb = usb_alloc_urb(0, GFP_KERNEL);
269 	if (!urb)
270 		return urb;
271 
272 	if (bInterval)
273 		usb_fill_int_urb(urb, udev, pipe, NULL, bytes, complete_fn,
274 				NULL, bInterval);
275 	else
276 		usb_fill_bulk_urb(urb, udev, pipe, NULL, bytes, complete_fn,
277 				NULL);
278 
279 	urb->interval = (udev->speed == USB_SPEED_HIGH)
280 			? (INTERRUPT_RATE << 3)
281 			: INTERRUPT_RATE;
282 	urb->transfer_flags = transfer_flags;
283 	if (usb_pipein(pipe))
284 		urb->transfer_flags |= URB_SHORT_NOT_OK;
285 
286 	if ((bytes + offset) == 0)
287 		return urb;
288 
289 	if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
290 		urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
291 			GFP_KERNEL, &urb->transfer_dma);
292 	else
293 		urb->transfer_buffer = kmalloc(bytes + offset, GFP_KERNEL);
294 
295 	if (!urb->transfer_buffer) {
296 		usb_free_urb(urb);
297 		return NULL;
298 	}
299 
300 	/* To test unaligned transfers add an offset and fill the
301 		unused memory with a guard value */
302 	if (offset) {
303 		memset(urb->transfer_buffer, GUARD_BYTE, offset);
304 		urb->transfer_buffer += offset;
305 		if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
306 			urb->transfer_dma += offset;
307 	}
308 
309 	/* For inbound transfers use guard byte so that test fails if
310 		data not correctly copied */
311 	memset(urb->transfer_buffer,
312 			usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
313 			bytes);
314 	return urb;
315 }
316 
317 static struct urb *simple_alloc_urb(
318 	struct usb_device	*udev,
319 	int			pipe,
320 	unsigned long		bytes,
321 	u8			bInterval)
322 {
323 	return usbtest_alloc_urb(udev, pipe, bytes, URB_NO_TRANSFER_DMA_MAP, 0,
324 			bInterval, simple_callback);
325 }
326 
327 static struct urb *complicated_alloc_urb(
328 	struct usb_device	*udev,
329 	int			pipe,
330 	unsigned long		bytes,
331 	u8			bInterval)
332 {
333 	return usbtest_alloc_urb(udev, pipe, bytes, URB_NO_TRANSFER_DMA_MAP, 0,
334 			bInterval, complicated_callback);
335 }
336 
337 static unsigned pattern;
338 static unsigned mod_pattern;
339 module_param_named(pattern, mod_pattern, uint, S_IRUGO | S_IWUSR);
340 MODULE_PARM_DESC(mod_pattern, "i/o pattern (0 == zeroes)");
341 
342 static unsigned get_maxpacket(struct usb_device *udev, int pipe)
343 {
344 	struct usb_host_endpoint	*ep;
345 
346 	ep = usb_pipe_endpoint(udev, pipe);
347 	return le16_to_cpup(&ep->desc.wMaxPacketSize);
348 }
349 
350 static void simple_fill_buf(struct urb *urb)
351 {
352 	unsigned	i;
353 	u8		*buf = urb->transfer_buffer;
354 	unsigned	len = urb->transfer_buffer_length;
355 	unsigned	maxpacket;
356 
357 	switch (pattern) {
358 	default:
359 		/* FALLTHROUGH */
360 	case 0:
361 		memset(buf, 0, len);
362 		break;
363 	case 1:			/* mod63 */
364 		maxpacket = get_maxpacket(urb->dev, urb->pipe);
365 		for (i = 0; i < len; i++)
366 			*buf++ = (u8) ((i % maxpacket) % 63);
367 		break;
368 	}
369 }
370 
371 static inline unsigned long buffer_offset(void *buf)
372 {
373 	return (unsigned long)buf & (ARCH_KMALLOC_MINALIGN - 1);
374 }
375 
376 static int check_guard_bytes(struct usbtest_dev *tdev, struct urb *urb)
377 {
378 	u8 *buf = urb->transfer_buffer;
379 	u8 *guard = buf - buffer_offset(buf);
380 	unsigned i;
381 
382 	for (i = 0; guard < buf; i++, guard++) {
383 		if (*guard != GUARD_BYTE) {
384 			ERROR(tdev, "guard byte[%d] %d (not %d)\n",
385 				i, *guard, GUARD_BYTE);
386 			return -EINVAL;
387 		}
388 	}
389 	return 0;
390 }
391 
392 static int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb)
393 {
394 	unsigned	i;
395 	u8		expected;
396 	u8		*buf = urb->transfer_buffer;
397 	unsigned	len = urb->actual_length;
398 	unsigned	maxpacket = get_maxpacket(urb->dev, urb->pipe);
399 
400 	int ret = check_guard_bytes(tdev, urb);
401 	if (ret)
402 		return ret;
403 
404 	for (i = 0; i < len; i++, buf++) {
405 		switch (pattern) {
406 		/* all-zeroes has no synchronization issues */
407 		case 0:
408 			expected = 0;
409 			break;
410 		/* mod63 stays in sync with short-terminated transfers,
411 		 * or otherwise when host and gadget agree on how large
412 		 * each usb transfer request should be.  resync is done
413 		 * with set_interface or set_config.
414 		 */
415 		case 1:			/* mod63 */
416 			expected = (i % maxpacket) % 63;
417 			break;
418 		/* always fail unsupported patterns */
419 		default:
420 			expected = !*buf;
421 			break;
422 		}
423 		if (*buf == expected)
424 			continue;
425 		ERROR(tdev, "buf[%d] = %d (not %d)\n", i, *buf, expected);
426 		return -EINVAL;
427 	}
428 	return 0;
429 }
430 
431 static void simple_free_urb(struct urb *urb)
432 {
433 	unsigned long offset = buffer_offset(urb->transfer_buffer);
434 
435 	if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
436 		usb_free_coherent(
437 			urb->dev,
438 			urb->transfer_buffer_length + offset,
439 			urb->transfer_buffer - offset,
440 			urb->transfer_dma - offset);
441 	else
442 		kfree(urb->transfer_buffer - offset);
443 	usb_free_urb(urb);
444 }
445 
446 static int simple_io(
447 	struct usbtest_dev	*tdev,
448 	struct urb		*urb,
449 	int			iterations,
450 	int			vary,
451 	int			expected,
452 	const char		*label
453 )
454 {
455 	struct usb_device	*udev = urb->dev;
456 	int			max = urb->transfer_buffer_length;
457 	struct completion	completion;
458 	int			retval = 0;
459 	unsigned long		expire;
460 
461 	urb->context = &completion;
462 	while (retval == 0 && iterations-- > 0) {
463 		init_completion(&completion);
464 		if (usb_pipeout(urb->pipe)) {
465 			simple_fill_buf(urb);
466 			urb->transfer_flags |= URB_ZERO_PACKET;
467 		}
468 		retval = usb_submit_urb(urb, GFP_KERNEL);
469 		if (retval != 0)
470 			break;
471 
472 		expire = msecs_to_jiffies(SIMPLE_IO_TIMEOUT);
473 		if (!wait_for_completion_timeout(&completion, expire)) {
474 			usb_kill_urb(urb);
475 			retval = (urb->status == -ENOENT ?
476 				  -ETIMEDOUT : urb->status);
477 		} else {
478 			retval = urb->status;
479 		}
480 
481 		urb->dev = udev;
482 		if (retval == 0 && usb_pipein(urb->pipe))
483 			retval = simple_check_buf(tdev, urb);
484 
485 		if (vary) {
486 			int	len = urb->transfer_buffer_length;
487 
488 			len += vary;
489 			len %= max;
490 			if (len == 0)
491 				len = (vary < max) ? vary : max;
492 			urb->transfer_buffer_length = len;
493 		}
494 
495 		/* FIXME if endpoint halted, clear halt (and log) */
496 	}
497 	urb->transfer_buffer_length = max;
498 
499 	if (expected != retval)
500 		dev_err(&udev->dev,
501 			"%s failed, iterations left %d, status %d (not %d)\n",
502 				label, iterations, retval, expected);
503 	return retval;
504 }
505 
506 
507 /*-------------------------------------------------------------------------*/
508 
509 /* We use scatterlist primitives to test queued I/O.
510  * Yes, this also tests the scatterlist primitives.
511  */
512 
513 static void free_sglist(struct scatterlist *sg, int nents)
514 {
515 	unsigned		i;
516 
517 	if (!sg)
518 		return;
519 	for (i = 0; i < nents; i++) {
520 		if (!sg_page(&sg[i]))
521 			continue;
522 		kfree(sg_virt(&sg[i]));
523 	}
524 	kfree(sg);
525 }
526 
527 static struct scatterlist *
528 alloc_sglist(int nents, int max, int vary, struct usbtest_dev *dev, int pipe)
529 {
530 	struct scatterlist	*sg;
531 	unsigned int		n_size = 0;
532 	unsigned		i;
533 	unsigned		size = max;
534 	unsigned		maxpacket =
535 		get_maxpacket(interface_to_usbdev(dev->intf), pipe);
536 
537 	if (max == 0)
538 		return NULL;
539 
540 	sg = kmalloc_array(nents, sizeof(*sg), GFP_KERNEL);
541 	if (!sg)
542 		return NULL;
543 	sg_init_table(sg, nents);
544 
545 	for (i = 0; i < nents; i++) {
546 		char		*buf;
547 		unsigned	j;
548 
549 		buf = kzalloc(size, GFP_KERNEL);
550 		if (!buf) {
551 			free_sglist(sg, i);
552 			return NULL;
553 		}
554 
555 		/* kmalloc pages are always physically contiguous! */
556 		sg_set_buf(&sg[i], buf, size);
557 
558 		switch (pattern) {
559 		case 0:
560 			/* already zeroed */
561 			break;
562 		case 1:
563 			for (j = 0; j < size; j++)
564 				*buf++ = (u8) (((j + n_size) % maxpacket) % 63);
565 			n_size += size;
566 			break;
567 		}
568 
569 		if (vary) {
570 			size += vary;
571 			size %= max;
572 			if (size == 0)
573 				size = (vary < max) ? vary : max;
574 		}
575 	}
576 
577 	return sg;
578 }
579 
580 struct sg_timeout {
581 	struct timer_list timer;
582 	struct usb_sg_request *req;
583 };
584 
585 static void sg_timeout(struct timer_list *t)
586 {
587 	struct sg_timeout *timeout = from_timer(timeout, t, timer);
588 
589 	usb_sg_cancel(timeout->req);
590 }
591 
592 static int perform_sglist(
593 	struct usbtest_dev	*tdev,
594 	unsigned		iterations,
595 	int			pipe,
596 	struct usb_sg_request	*req,
597 	struct scatterlist	*sg,
598 	int			nents
599 )
600 {
601 	struct usb_device	*udev = testdev_to_usbdev(tdev);
602 	int			retval = 0;
603 	struct sg_timeout	timeout = {
604 		.req = req,
605 	};
606 
607 	timer_setup_on_stack(&timeout.timer, sg_timeout, 0);
608 
609 	while (retval == 0 && iterations-- > 0) {
610 		retval = usb_sg_init(req, udev, pipe,
611 				(udev->speed == USB_SPEED_HIGH)
612 					? (INTERRUPT_RATE << 3)
613 					: INTERRUPT_RATE,
614 				sg, nents, 0, GFP_KERNEL);
615 
616 		if (retval)
617 			break;
618 		mod_timer(&timeout.timer, jiffies +
619 				msecs_to_jiffies(SIMPLE_IO_TIMEOUT));
620 		usb_sg_wait(req);
621 		if (!del_timer_sync(&timeout.timer))
622 			retval = -ETIMEDOUT;
623 		else
624 			retval = req->status;
625 		destroy_timer_on_stack(&timeout.timer);
626 
627 		/* FIXME check resulting data pattern */
628 
629 		/* FIXME if endpoint halted, clear halt (and log) */
630 	}
631 
632 	/* FIXME for unlink or fault handling tests, don't report
633 	 * failure if retval is as we expected ...
634 	 */
635 	if (retval)
636 		ERROR(tdev, "perform_sglist failed, "
637 				"iterations left %d, status %d\n",
638 				iterations, retval);
639 	return retval;
640 }
641 
642 
643 /*-------------------------------------------------------------------------*/
644 
645 /* unqueued control message testing
646  *
647  * there's a nice set of device functional requirements in chapter 9 of the
648  * usb 2.0 spec, which we can apply to ANY device, even ones that don't use
649  * special test firmware.
650  *
651  * we know the device is configured (or suspended) by the time it's visible
652  * through usbfs.  we can't change that, so we won't test enumeration (which
653  * worked 'well enough' to get here, this time), power management (ditto),
654  * or remote wakeup (which needs human interaction).
655  */
656 
657 static unsigned realworld = 1;
658 module_param(realworld, uint, 0);
659 MODULE_PARM_DESC(realworld, "clear to demand stricter spec compliance");
660 
661 static int get_altsetting(struct usbtest_dev *dev)
662 {
663 	struct usb_interface	*iface = dev->intf;
664 	struct usb_device	*udev = interface_to_usbdev(iface);
665 	int			retval;
666 
667 	retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
668 			USB_REQ_GET_INTERFACE, USB_DIR_IN|USB_RECIP_INTERFACE,
669 			0, iface->altsetting[0].desc.bInterfaceNumber,
670 			dev->buf, 1, USB_CTRL_GET_TIMEOUT);
671 	switch (retval) {
672 	case 1:
673 		return dev->buf[0];
674 	case 0:
675 		retval = -ERANGE;
676 		/* FALLTHROUGH */
677 	default:
678 		return retval;
679 	}
680 }
681 
682 static int set_altsetting(struct usbtest_dev *dev, int alternate)
683 {
684 	struct usb_interface		*iface = dev->intf;
685 	struct usb_device		*udev;
686 
687 	if (alternate < 0 || alternate >= 256)
688 		return -EINVAL;
689 
690 	udev = interface_to_usbdev(iface);
691 	return usb_set_interface(udev,
692 			iface->altsetting[0].desc.bInterfaceNumber,
693 			alternate);
694 }
695 
696 static int is_good_config(struct usbtest_dev *tdev, int len)
697 {
698 	struct usb_config_descriptor	*config;
699 
700 	if (len < sizeof(*config))
701 		return 0;
702 	config = (struct usb_config_descriptor *) tdev->buf;
703 
704 	switch (config->bDescriptorType) {
705 	case USB_DT_CONFIG:
706 	case USB_DT_OTHER_SPEED_CONFIG:
707 		if (config->bLength != 9) {
708 			ERROR(tdev, "bogus config descriptor length\n");
709 			return 0;
710 		}
711 		/* this bit 'must be 1' but often isn't */
712 		if (!realworld && !(config->bmAttributes & 0x80)) {
713 			ERROR(tdev, "high bit of config attributes not set\n");
714 			return 0;
715 		}
716 		if (config->bmAttributes & 0x1f) {	/* reserved == 0 */
717 			ERROR(tdev, "reserved config bits set\n");
718 			return 0;
719 		}
720 		break;
721 	default:
722 		return 0;
723 	}
724 
725 	if (le16_to_cpu(config->wTotalLength) == len)	/* read it all */
726 		return 1;
727 	if (le16_to_cpu(config->wTotalLength) >= TBUF_SIZE)	/* max partial read */
728 		return 1;
729 	ERROR(tdev, "bogus config descriptor read size\n");
730 	return 0;
731 }
732 
733 static int is_good_ext(struct usbtest_dev *tdev, u8 *buf)
734 {
735 	struct usb_ext_cap_descriptor *ext;
736 	u32 attr;
737 
738 	ext = (struct usb_ext_cap_descriptor *) buf;
739 
740 	if (ext->bLength != USB_DT_USB_EXT_CAP_SIZE) {
741 		ERROR(tdev, "bogus usb 2.0 extension descriptor length\n");
742 		return 0;
743 	}
744 
745 	attr = le32_to_cpu(ext->bmAttributes);
746 	/* bits[1:15] is used and others are reserved */
747 	if (attr & ~0xfffe) {	/* reserved == 0 */
748 		ERROR(tdev, "reserved bits set\n");
749 		return 0;
750 	}
751 
752 	return 1;
753 }
754 
755 static int is_good_ss_cap(struct usbtest_dev *tdev, u8 *buf)
756 {
757 	struct usb_ss_cap_descriptor *ss;
758 
759 	ss = (struct usb_ss_cap_descriptor *) buf;
760 
761 	if (ss->bLength != USB_DT_USB_SS_CAP_SIZE) {
762 		ERROR(tdev, "bogus superspeed device capability descriptor length\n");
763 		return 0;
764 	}
765 
766 	/*
767 	 * only bit[1] of bmAttributes is used for LTM and others are
768 	 * reserved
769 	 */
770 	if (ss->bmAttributes & ~0x02) {	/* reserved == 0 */
771 		ERROR(tdev, "reserved bits set in bmAttributes\n");
772 		return 0;
773 	}
774 
775 	/* bits[0:3] of wSpeedSupported is used and others are reserved */
776 	if (le16_to_cpu(ss->wSpeedSupported) & ~0x0f) {	/* reserved == 0 */
777 		ERROR(tdev, "reserved bits set in wSpeedSupported\n");
778 		return 0;
779 	}
780 
781 	return 1;
782 }
783 
784 static int is_good_con_id(struct usbtest_dev *tdev, u8 *buf)
785 {
786 	struct usb_ss_container_id_descriptor *con_id;
787 
788 	con_id = (struct usb_ss_container_id_descriptor *) buf;
789 
790 	if (con_id->bLength != USB_DT_USB_SS_CONTN_ID_SIZE) {
791 		ERROR(tdev, "bogus container id descriptor length\n");
792 		return 0;
793 	}
794 
795 	if (con_id->bReserved) {	/* reserved == 0 */
796 		ERROR(tdev, "reserved bits set\n");
797 		return 0;
798 	}
799 
800 	return 1;
801 }
802 
803 /* sanity test for standard requests working with usb_control_mesg() and some
804  * of the utility functions which use it.
805  *
806  * this doesn't test how endpoint halts behave or data toggles get set, since
807  * we won't do I/O to bulk/interrupt endpoints here (which is how to change
808  * halt or toggle).  toggle testing is impractical without support from hcds.
809  *
810  * this avoids failing devices linux would normally work with, by not testing
811  * config/altsetting operations for devices that only support their defaults.
812  * such devices rarely support those needless operations.
813  *
814  * NOTE that since this is a sanity test, it's not examining boundary cases
815  * to see if usbcore, hcd, and device all behave right.  such testing would
816  * involve varied read sizes and other operation sequences.
817  */
818 static int ch9_postconfig(struct usbtest_dev *dev)
819 {
820 	struct usb_interface	*iface = dev->intf;
821 	struct usb_device	*udev = interface_to_usbdev(iface);
822 	int			i, alt, retval;
823 
824 	/* [9.2.3] if there's more than one altsetting, we need to be able to
825 	 * set and get each one.  mostly trusts the descriptors from usbcore.
826 	 */
827 	for (i = 0; i < iface->num_altsetting; i++) {
828 
829 		/* 9.2.3 constrains the range here */
830 		alt = iface->altsetting[i].desc.bAlternateSetting;
831 		if (alt < 0 || alt >= iface->num_altsetting) {
832 			dev_err(&iface->dev,
833 					"invalid alt [%d].bAltSetting = %d\n",
834 					i, alt);
835 		}
836 
837 		/* [real world] get/set unimplemented if there's only one */
838 		if (realworld && iface->num_altsetting == 1)
839 			continue;
840 
841 		/* [9.4.10] set_interface */
842 		retval = set_altsetting(dev, alt);
843 		if (retval) {
844 			dev_err(&iface->dev, "can't set_interface = %d, %d\n",
845 					alt, retval);
846 			return retval;
847 		}
848 
849 		/* [9.4.4] get_interface always works */
850 		retval = get_altsetting(dev);
851 		if (retval != alt) {
852 			dev_err(&iface->dev, "get alt should be %d, was %d\n",
853 					alt, retval);
854 			return (retval < 0) ? retval : -EDOM;
855 		}
856 
857 	}
858 
859 	/* [real world] get_config unimplemented if there's only one */
860 	if (!realworld || udev->descriptor.bNumConfigurations != 1) {
861 		int	expected = udev->actconfig->desc.bConfigurationValue;
862 
863 		/* [9.4.2] get_configuration always works
864 		 * ... although some cheap devices (like one TI Hub I've got)
865 		 * won't return config descriptors except before set_config.
866 		 */
867 		retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
868 				USB_REQ_GET_CONFIGURATION,
869 				USB_DIR_IN | USB_RECIP_DEVICE,
870 				0, 0, dev->buf, 1, USB_CTRL_GET_TIMEOUT);
871 		if (retval != 1 || dev->buf[0] != expected) {
872 			dev_err(&iface->dev, "get config --> %d %d (1 %d)\n",
873 				retval, dev->buf[0], expected);
874 			return (retval < 0) ? retval : -EDOM;
875 		}
876 	}
877 
878 	/* there's always [9.4.3] a device descriptor [9.6.1] */
879 	retval = usb_get_descriptor(udev, USB_DT_DEVICE, 0,
880 			dev->buf, sizeof(udev->descriptor));
881 	if (retval != sizeof(udev->descriptor)) {
882 		dev_err(&iface->dev, "dev descriptor --> %d\n", retval);
883 		return (retval < 0) ? retval : -EDOM;
884 	}
885 
886 	/*
887 	 * there's always [9.4.3] a bos device descriptor [9.6.2] in USB
888 	 * 3.0 spec
889 	 */
890 	if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0210) {
891 		struct usb_bos_descriptor *bos = NULL;
892 		struct usb_dev_cap_header *header = NULL;
893 		unsigned total, num, length;
894 		u8 *buf;
895 
896 		retval = usb_get_descriptor(udev, USB_DT_BOS, 0, dev->buf,
897 				sizeof(*udev->bos->desc));
898 		if (retval != sizeof(*udev->bos->desc)) {
899 			dev_err(&iface->dev, "bos descriptor --> %d\n", retval);
900 			return (retval < 0) ? retval : -EDOM;
901 		}
902 
903 		bos = (struct usb_bos_descriptor *)dev->buf;
904 		total = le16_to_cpu(bos->wTotalLength);
905 		num = bos->bNumDeviceCaps;
906 
907 		if (total > TBUF_SIZE)
908 			total = TBUF_SIZE;
909 
910 		/*
911 		 * get generic device-level capability descriptors [9.6.2]
912 		 * in USB 3.0 spec
913 		 */
914 		retval = usb_get_descriptor(udev, USB_DT_BOS, 0, dev->buf,
915 				total);
916 		if (retval != total) {
917 			dev_err(&iface->dev, "bos descriptor set --> %d\n",
918 					retval);
919 			return (retval < 0) ? retval : -EDOM;
920 		}
921 
922 		length = sizeof(*udev->bos->desc);
923 		buf = dev->buf;
924 		for (i = 0; i < num; i++) {
925 			buf += length;
926 			if (buf + sizeof(struct usb_dev_cap_header) >
927 					dev->buf + total)
928 				break;
929 
930 			header = (struct usb_dev_cap_header *)buf;
931 			length = header->bLength;
932 
933 			if (header->bDescriptorType !=
934 					USB_DT_DEVICE_CAPABILITY) {
935 				dev_warn(&udev->dev, "not device capability descriptor, skip\n");
936 				continue;
937 			}
938 
939 			switch (header->bDevCapabilityType) {
940 			case USB_CAP_TYPE_EXT:
941 				if (buf + USB_DT_USB_EXT_CAP_SIZE >
942 						dev->buf + total ||
943 						!is_good_ext(dev, buf)) {
944 					dev_err(&iface->dev, "bogus usb 2.0 extension descriptor\n");
945 					return -EDOM;
946 				}
947 				break;
948 			case USB_SS_CAP_TYPE:
949 				if (buf + USB_DT_USB_SS_CAP_SIZE >
950 						dev->buf + total ||
951 						!is_good_ss_cap(dev, buf)) {
952 					dev_err(&iface->dev, "bogus superspeed device capability descriptor\n");
953 					return -EDOM;
954 				}
955 				break;
956 			case CONTAINER_ID_TYPE:
957 				if (buf + USB_DT_USB_SS_CONTN_ID_SIZE >
958 						dev->buf + total ||
959 						!is_good_con_id(dev, buf)) {
960 					dev_err(&iface->dev, "bogus container id descriptor\n");
961 					return -EDOM;
962 				}
963 				break;
964 			default:
965 				break;
966 			}
967 		}
968 	}
969 
970 	/* there's always [9.4.3] at least one config descriptor [9.6.3] */
971 	for (i = 0; i < udev->descriptor.bNumConfigurations; i++) {
972 		retval = usb_get_descriptor(udev, USB_DT_CONFIG, i,
973 				dev->buf, TBUF_SIZE);
974 		if (!is_good_config(dev, retval)) {
975 			dev_err(&iface->dev,
976 					"config [%d] descriptor --> %d\n",
977 					i, retval);
978 			return (retval < 0) ? retval : -EDOM;
979 		}
980 
981 		/* FIXME cross-checking udev->config[i] to make sure usbcore
982 		 * parsed it right (etc) would be good testing paranoia
983 		 */
984 	}
985 
986 	/* and sometimes [9.2.6.6] speed dependent descriptors */
987 	if (le16_to_cpu(udev->descriptor.bcdUSB) == 0x0200) {
988 		struct usb_qualifier_descriptor *d = NULL;
989 
990 		/* device qualifier [9.6.2] */
991 		retval = usb_get_descriptor(udev,
992 				USB_DT_DEVICE_QUALIFIER, 0, dev->buf,
993 				sizeof(struct usb_qualifier_descriptor));
994 		if (retval == -EPIPE) {
995 			if (udev->speed == USB_SPEED_HIGH) {
996 				dev_err(&iface->dev,
997 						"hs dev qualifier --> %d\n",
998 						retval);
999 				return retval;
1000 			}
1001 			/* usb2.0 but not high-speed capable; fine */
1002 		} else if (retval != sizeof(struct usb_qualifier_descriptor)) {
1003 			dev_err(&iface->dev, "dev qualifier --> %d\n", retval);
1004 			return (retval < 0) ? retval : -EDOM;
1005 		} else
1006 			d = (struct usb_qualifier_descriptor *) dev->buf;
1007 
1008 		/* might not have [9.6.2] any other-speed configs [9.6.4] */
1009 		if (d) {
1010 			unsigned max = d->bNumConfigurations;
1011 			for (i = 0; i < max; i++) {
1012 				retval = usb_get_descriptor(udev,
1013 					USB_DT_OTHER_SPEED_CONFIG, i,
1014 					dev->buf, TBUF_SIZE);
1015 				if (!is_good_config(dev, retval)) {
1016 					dev_err(&iface->dev,
1017 						"other speed config --> %d\n",
1018 						retval);
1019 					return (retval < 0) ? retval : -EDOM;
1020 				}
1021 			}
1022 		}
1023 	}
1024 	/* FIXME fetch strings from at least the device descriptor */
1025 
1026 	/* [9.4.5] get_status always works */
1027 	retval = usb_get_std_status(udev, USB_RECIP_DEVICE, 0, dev->buf);
1028 	if (retval) {
1029 		dev_err(&iface->dev, "get dev status --> %d\n", retval);
1030 		return retval;
1031 	}
1032 
1033 	/* FIXME configuration.bmAttributes says if we could try to set/clear
1034 	 * the device's remote wakeup feature ... if we can, test that here
1035 	 */
1036 
1037 	retval = usb_get_std_status(udev, USB_RECIP_INTERFACE,
1038 			iface->altsetting[0].desc.bInterfaceNumber, dev->buf);
1039 	if (retval) {
1040 		dev_err(&iface->dev, "get interface status --> %d\n", retval);
1041 		return retval;
1042 	}
1043 	/* FIXME get status for each endpoint in the interface */
1044 
1045 	return 0;
1046 }
1047 
1048 /*-------------------------------------------------------------------------*/
1049 
1050 /* use ch9 requests to test whether:
1051  *   (a) queues work for control, keeping N subtests queued and
1052  *       active (auto-resubmit) for M loops through the queue.
1053  *   (b) protocol stalls (control-only) will autorecover.
1054  *       it's not like bulk/intr; no halt clearing.
1055  *   (c) short control reads are reported and handled.
1056  *   (d) queues are always processed in-order
1057  */
1058 
1059 struct ctrl_ctx {
1060 	spinlock_t		lock;
1061 	struct usbtest_dev	*dev;
1062 	struct completion	complete;
1063 	unsigned		count;
1064 	unsigned		pending;
1065 	int			status;
1066 	struct urb		**urb;
1067 	struct usbtest_param_32	*param;
1068 	int			last;
1069 };
1070 
1071 #define NUM_SUBCASES	16		/* how many test subcases here? */
1072 
1073 struct subcase {
1074 	struct usb_ctrlrequest	setup;
1075 	int			number;
1076 	int			expected;
1077 };
1078 
1079 static void ctrl_complete(struct urb *urb)
1080 {
1081 	struct ctrl_ctx		*ctx = urb->context;
1082 	struct usb_ctrlrequest	*reqp;
1083 	struct subcase		*subcase;
1084 	int			status = urb->status;
1085 	unsigned long		flags;
1086 
1087 	reqp = (struct usb_ctrlrequest *)urb->setup_packet;
1088 	subcase = container_of(reqp, struct subcase, setup);
1089 
1090 	spin_lock_irqsave(&ctx->lock, flags);
1091 	ctx->count--;
1092 	ctx->pending--;
1093 
1094 	/* queue must transfer and complete in fifo order, unless
1095 	 * usb_unlink_urb() is used to unlink something not at the
1096 	 * physical queue head (not tested).
1097 	 */
1098 	if (subcase->number > 0) {
1099 		if ((subcase->number - ctx->last) != 1) {
1100 			ERROR(ctx->dev,
1101 				"subcase %d completed out of order, last %d\n",
1102 				subcase->number, ctx->last);
1103 			status = -EDOM;
1104 			ctx->last = subcase->number;
1105 			goto error;
1106 		}
1107 	}
1108 	ctx->last = subcase->number;
1109 
1110 	/* succeed or fault in only one way? */
1111 	if (status == subcase->expected)
1112 		status = 0;
1113 
1114 	/* async unlink for cleanup? */
1115 	else if (status != -ECONNRESET) {
1116 
1117 		/* some faults are allowed, not required */
1118 		if (subcase->expected > 0 && (
1119 			  ((status == -subcase->expected	/* happened */
1120 			   || status == 0))))			/* didn't */
1121 			status = 0;
1122 		/* sometimes more than one fault is allowed */
1123 		else if (subcase->number == 12 && status == -EPIPE)
1124 			status = 0;
1125 		else
1126 			ERROR(ctx->dev, "subtest %d error, status %d\n",
1127 					subcase->number, status);
1128 	}
1129 
1130 	/* unexpected status codes mean errors; ideally, in hardware */
1131 	if (status) {
1132 error:
1133 		if (ctx->status == 0) {
1134 			int		i;
1135 
1136 			ctx->status = status;
1137 			ERROR(ctx->dev, "control queue %02x.%02x, err %d, "
1138 					"%d left, subcase %d, len %d/%d\n",
1139 					reqp->bRequestType, reqp->bRequest,
1140 					status, ctx->count, subcase->number,
1141 					urb->actual_length,
1142 					urb->transfer_buffer_length);
1143 
1144 			/* FIXME this "unlink everything" exit route should
1145 			 * be a separate test case.
1146 			 */
1147 
1148 			/* unlink whatever's still pending */
1149 			for (i = 1; i < ctx->param->sglen; i++) {
1150 				struct urb *u = ctx->urb[
1151 							(i + subcase->number)
1152 							% ctx->param->sglen];
1153 
1154 				if (u == urb || !u->dev)
1155 					continue;
1156 				spin_unlock(&ctx->lock);
1157 				status = usb_unlink_urb(u);
1158 				spin_lock(&ctx->lock);
1159 				switch (status) {
1160 				case -EINPROGRESS:
1161 				case -EBUSY:
1162 				case -EIDRM:
1163 					continue;
1164 				default:
1165 					ERROR(ctx->dev, "urb unlink --> %d\n",
1166 							status);
1167 				}
1168 			}
1169 			status = ctx->status;
1170 		}
1171 	}
1172 
1173 	/* resubmit if we need to, else mark this as done */
1174 	if ((status == 0) && (ctx->pending < ctx->count)) {
1175 		status = usb_submit_urb(urb, GFP_ATOMIC);
1176 		if (status != 0) {
1177 			ERROR(ctx->dev,
1178 				"can't resubmit ctrl %02x.%02x, err %d\n",
1179 				reqp->bRequestType, reqp->bRequest, status);
1180 			urb->dev = NULL;
1181 		} else
1182 			ctx->pending++;
1183 	} else
1184 		urb->dev = NULL;
1185 
1186 	/* signal completion when nothing's queued */
1187 	if (ctx->pending == 0)
1188 		complete(&ctx->complete);
1189 	spin_unlock_irqrestore(&ctx->lock, flags);
1190 }
1191 
1192 static int
1193 test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param_32 *param)
1194 {
1195 	struct usb_device	*udev = testdev_to_usbdev(dev);
1196 	struct urb		**urb;
1197 	struct ctrl_ctx		context;
1198 	int			i;
1199 
1200 	if (param->sglen == 0 || param->iterations > UINT_MAX / param->sglen)
1201 		return -EOPNOTSUPP;
1202 
1203 	spin_lock_init(&context.lock);
1204 	context.dev = dev;
1205 	init_completion(&context.complete);
1206 	context.count = param->sglen * param->iterations;
1207 	context.pending = 0;
1208 	context.status = -ENOMEM;
1209 	context.param = param;
1210 	context.last = -1;
1211 
1212 	/* allocate and init the urbs we'll queue.
1213 	 * as with bulk/intr sglists, sglen is the queue depth; it also
1214 	 * controls which subtests run (more tests than sglen) or rerun.
1215 	 */
1216 	urb = kcalloc(param->sglen, sizeof(struct urb *), GFP_KERNEL);
1217 	if (!urb)
1218 		return -ENOMEM;
1219 	for (i = 0; i < param->sglen; i++) {
1220 		int			pipe = usb_rcvctrlpipe(udev, 0);
1221 		unsigned		len;
1222 		struct urb		*u;
1223 		struct usb_ctrlrequest	req;
1224 		struct subcase		*reqp;
1225 
1226 		/* sign of this variable means:
1227 		 *  -: tested code must return this (negative) error code
1228 		 *  +: tested code may return this (negative too) error code
1229 		 */
1230 		int			expected = 0;
1231 
1232 		/* requests here are mostly expected to succeed on any
1233 		 * device, but some are chosen to trigger protocol stalls
1234 		 * or short reads.
1235 		 */
1236 		memset(&req, 0, sizeof(req));
1237 		req.bRequest = USB_REQ_GET_DESCRIPTOR;
1238 		req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
1239 
1240 		switch (i % NUM_SUBCASES) {
1241 		case 0:		/* get device descriptor */
1242 			req.wValue = cpu_to_le16(USB_DT_DEVICE << 8);
1243 			len = sizeof(struct usb_device_descriptor);
1244 			break;
1245 		case 1:		/* get first config descriptor (only) */
1246 			req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1247 			len = sizeof(struct usb_config_descriptor);
1248 			break;
1249 		case 2:		/* get altsetting (OFTEN STALLS) */
1250 			req.bRequest = USB_REQ_GET_INTERFACE;
1251 			req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
1252 			/* index = 0 means first interface */
1253 			len = 1;
1254 			expected = EPIPE;
1255 			break;
1256 		case 3:		/* get interface status */
1257 			req.bRequest = USB_REQ_GET_STATUS;
1258 			req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
1259 			/* interface 0 */
1260 			len = 2;
1261 			break;
1262 		case 4:		/* get device status */
1263 			req.bRequest = USB_REQ_GET_STATUS;
1264 			req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
1265 			len = 2;
1266 			break;
1267 		case 5:		/* get device qualifier (MAY STALL) */
1268 			req.wValue = cpu_to_le16 (USB_DT_DEVICE_QUALIFIER << 8);
1269 			len = sizeof(struct usb_qualifier_descriptor);
1270 			if (udev->speed != USB_SPEED_HIGH)
1271 				expected = EPIPE;
1272 			break;
1273 		case 6:		/* get first config descriptor, plus interface */
1274 			req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1275 			len = sizeof(struct usb_config_descriptor);
1276 			len += sizeof(struct usb_interface_descriptor);
1277 			break;
1278 		case 7:		/* get interface descriptor (ALWAYS STALLS) */
1279 			req.wValue = cpu_to_le16 (USB_DT_INTERFACE << 8);
1280 			/* interface == 0 */
1281 			len = sizeof(struct usb_interface_descriptor);
1282 			expected = -EPIPE;
1283 			break;
1284 		/* NOTE: two consecutive stalls in the queue here.
1285 		 *  that tests fault recovery a bit more aggressively. */
1286 		case 8:		/* clear endpoint halt (MAY STALL) */
1287 			req.bRequest = USB_REQ_CLEAR_FEATURE;
1288 			req.bRequestType = USB_RECIP_ENDPOINT;
1289 			/* wValue 0 == ep halt */
1290 			/* wIndex 0 == ep0 (shouldn't halt!) */
1291 			len = 0;
1292 			pipe = usb_sndctrlpipe(udev, 0);
1293 			expected = EPIPE;
1294 			break;
1295 		case 9:		/* get endpoint status */
1296 			req.bRequest = USB_REQ_GET_STATUS;
1297 			req.bRequestType = USB_DIR_IN|USB_RECIP_ENDPOINT;
1298 			/* endpoint 0 */
1299 			len = 2;
1300 			break;
1301 		case 10:	/* trigger short read (EREMOTEIO) */
1302 			req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1303 			len = 1024;
1304 			expected = -EREMOTEIO;
1305 			break;
1306 		/* NOTE: two consecutive _different_ faults in the queue. */
1307 		case 11:	/* get endpoint descriptor (ALWAYS STALLS) */
1308 			req.wValue = cpu_to_le16(USB_DT_ENDPOINT << 8);
1309 			/* endpoint == 0 */
1310 			len = sizeof(struct usb_interface_descriptor);
1311 			expected = EPIPE;
1312 			break;
1313 		/* NOTE: sometimes even a third fault in the queue! */
1314 		case 12:	/* get string 0 descriptor (MAY STALL) */
1315 			req.wValue = cpu_to_le16(USB_DT_STRING << 8);
1316 			/* string == 0, for language IDs */
1317 			len = sizeof(struct usb_interface_descriptor);
1318 			/* may succeed when > 4 languages */
1319 			expected = EREMOTEIO;	/* or EPIPE, if no strings */
1320 			break;
1321 		case 13:	/* short read, resembling case 10 */
1322 			req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1323 			/* last data packet "should" be DATA1, not DATA0 */
1324 			if (udev->speed == USB_SPEED_SUPER)
1325 				len = 1024 - 512;
1326 			else
1327 				len = 1024 - udev->descriptor.bMaxPacketSize0;
1328 			expected = -EREMOTEIO;
1329 			break;
1330 		case 14:	/* short read; try to fill the last packet */
1331 			req.wValue = cpu_to_le16((USB_DT_DEVICE << 8) | 0);
1332 			/* device descriptor size == 18 bytes */
1333 			len = udev->descriptor.bMaxPacketSize0;
1334 			if (udev->speed == USB_SPEED_SUPER)
1335 				len = 512;
1336 			switch (len) {
1337 			case 8:
1338 				len = 24;
1339 				break;
1340 			case 16:
1341 				len = 32;
1342 				break;
1343 			}
1344 			expected = -EREMOTEIO;
1345 			break;
1346 		case 15:
1347 			req.wValue = cpu_to_le16(USB_DT_BOS << 8);
1348 			if (udev->bos)
1349 				len = le16_to_cpu(udev->bos->desc->wTotalLength);
1350 			else
1351 				len = sizeof(struct usb_bos_descriptor);
1352 			if (le16_to_cpu(udev->descriptor.bcdUSB) < 0x0201)
1353 				expected = -EPIPE;
1354 			break;
1355 		default:
1356 			ERROR(dev, "bogus number of ctrl queue testcases!\n");
1357 			context.status = -EINVAL;
1358 			goto cleanup;
1359 		}
1360 		req.wLength = cpu_to_le16(len);
1361 		urb[i] = u = simple_alloc_urb(udev, pipe, len, 0);
1362 		if (!u)
1363 			goto cleanup;
1364 
1365 		reqp = kmalloc(sizeof(*reqp), GFP_KERNEL);
1366 		if (!reqp)
1367 			goto cleanup;
1368 		reqp->setup = req;
1369 		reqp->number = i % NUM_SUBCASES;
1370 		reqp->expected = expected;
1371 		u->setup_packet = (char *) &reqp->setup;
1372 
1373 		u->context = &context;
1374 		u->complete = ctrl_complete;
1375 	}
1376 
1377 	/* queue the urbs */
1378 	context.urb = urb;
1379 	spin_lock_irq(&context.lock);
1380 	for (i = 0; i < param->sglen; i++) {
1381 		context.status = usb_submit_urb(urb[i], GFP_ATOMIC);
1382 		if (context.status != 0) {
1383 			ERROR(dev, "can't submit urb[%d], status %d\n",
1384 					i, context.status);
1385 			context.count = context.pending;
1386 			break;
1387 		}
1388 		context.pending++;
1389 	}
1390 	spin_unlock_irq(&context.lock);
1391 
1392 	/* FIXME  set timer and time out; provide a disconnect hook */
1393 
1394 	/* wait for the last one to complete */
1395 	if (context.pending > 0)
1396 		wait_for_completion(&context.complete);
1397 
1398 cleanup:
1399 	for (i = 0; i < param->sglen; i++) {
1400 		if (!urb[i])
1401 			continue;
1402 		urb[i]->dev = udev;
1403 		kfree(urb[i]->setup_packet);
1404 		simple_free_urb(urb[i]);
1405 	}
1406 	kfree(urb);
1407 	return context.status;
1408 }
1409 #undef NUM_SUBCASES
1410 
1411 
1412 /*-------------------------------------------------------------------------*/
1413 
1414 static void unlink1_callback(struct urb *urb)
1415 {
1416 	int	status = urb->status;
1417 
1418 	/* we "know" -EPIPE (stall) never happens */
1419 	if (!status)
1420 		status = usb_submit_urb(urb, GFP_ATOMIC);
1421 	if (status) {
1422 		urb->status = status;
1423 		complete(urb->context);
1424 	}
1425 }
1426 
1427 static int unlink1(struct usbtest_dev *dev, int pipe, int size, int async)
1428 {
1429 	struct urb		*urb;
1430 	struct completion	completion;
1431 	int			retval = 0;
1432 
1433 	init_completion(&completion);
1434 	urb = simple_alloc_urb(testdev_to_usbdev(dev), pipe, size, 0);
1435 	if (!urb)
1436 		return -ENOMEM;
1437 	urb->context = &completion;
1438 	urb->complete = unlink1_callback;
1439 
1440 	if (usb_pipeout(urb->pipe)) {
1441 		simple_fill_buf(urb);
1442 		urb->transfer_flags |= URB_ZERO_PACKET;
1443 	}
1444 
1445 	/* keep the endpoint busy.  there are lots of hc/hcd-internal
1446 	 * states, and testing should get to all of them over time.
1447 	 *
1448 	 * FIXME want additional tests for when endpoint is STALLing
1449 	 * due to errors, or is just NAKing requests.
1450 	 */
1451 	retval = usb_submit_urb(urb, GFP_KERNEL);
1452 	if (retval != 0) {
1453 		dev_err(&dev->intf->dev, "submit fail %d\n", retval);
1454 		return retval;
1455 	}
1456 
1457 	/* unlinking that should always work.  variable delay tests more
1458 	 * hcd states and code paths, even with little other system load.
1459 	 */
1460 	msleep(jiffies % (2 * INTERRUPT_RATE));
1461 	if (async) {
1462 		while (!completion_done(&completion)) {
1463 			retval = usb_unlink_urb(urb);
1464 
1465 			if (retval == 0 && usb_pipein(urb->pipe))
1466 				retval = simple_check_buf(dev, urb);
1467 
1468 			switch (retval) {
1469 			case -EBUSY:
1470 			case -EIDRM:
1471 				/* we can't unlink urbs while they're completing
1472 				 * or if they've completed, and we haven't
1473 				 * resubmitted. "normal" drivers would prevent
1474 				 * resubmission, but since we're testing unlink
1475 				 * paths, we can't.
1476 				 */
1477 				ERROR(dev, "unlink retry\n");
1478 				continue;
1479 			case 0:
1480 			case -EINPROGRESS:
1481 				break;
1482 
1483 			default:
1484 				dev_err(&dev->intf->dev,
1485 					"unlink fail %d\n", retval);
1486 				return retval;
1487 			}
1488 
1489 			break;
1490 		}
1491 	} else
1492 		usb_kill_urb(urb);
1493 
1494 	wait_for_completion(&completion);
1495 	retval = urb->status;
1496 	simple_free_urb(urb);
1497 
1498 	if (async)
1499 		return (retval == -ECONNRESET) ? 0 : retval - 1000;
1500 	else
1501 		return (retval == -ENOENT || retval == -EPERM) ?
1502 				0 : retval - 2000;
1503 }
1504 
1505 static int unlink_simple(struct usbtest_dev *dev, int pipe, int len)
1506 {
1507 	int			retval = 0;
1508 
1509 	/* test sync and async paths */
1510 	retval = unlink1(dev, pipe, len, 1);
1511 	if (!retval)
1512 		retval = unlink1(dev, pipe, len, 0);
1513 	return retval;
1514 }
1515 
1516 /*-------------------------------------------------------------------------*/
1517 
1518 struct queued_ctx {
1519 	struct completion	complete;
1520 	atomic_t		pending;
1521 	unsigned		num;
1522 	int			status;
1523 	struct urb		**urbs;
1524 };
1525 
1526 static void unlink_queued_callback(struct urb *urb)
1527 {
1528 	int			status = urb->status;
1529 	struct queued_ctx	*ctx = urb->context;
1530 
1531 	if (ctx->status)
1532 		goto done;
1533 	if (urb == ctx->urbs[ctx->num - 4] || urb == ctx->urbs[ctx->num - 2]) {
1534 		if (status == -ECONNRESET)
1535 			goto done;
1536 		/* What error should we report if the URB completed normally? */
1537 	}
1538 	if (status != 0)
1539 		ctx->status = status;
1540 
1541  done:
1542 	if (atomic_dec_and_test(&ctx->pending))
1543 		complete(&ctx->complete);
1544 }
1545 
1546 static int unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num,
1547 		unsigned size)
1548 {
1549 	struct queued_ctx	ctx;
1550 	struct usb_device	*udev = testdev_to_usbdev(dev);
1551 	void			*buf;
1552 	dma_addr_t		buf_dma;
1553 	int			i;
1554 	int			retval = -ENOMEM;
1555 
1556 	init_completion(&ctx.complete);
1557 	atomic_set(&ctx.pending, 1);	/* One more than the actual value */
1558 	ctx.num = num;
1559 	ctx.status = 0;
1560 
1561 	buf = usb_alloc_coherent(udev, size, GFP_KERNEL, &buf_dma);
1562 	if (!buf)
1563 		return retval;
1564 	memset(buf, 0, size);
1565 
1566 	/* Allocate and init the urbs we'll queue */
1567 	ctx.urbs = kcalloc(num, sizeof(struct urb *), GFP_KERNEL);
1568 	if (!ctx.urbs)
1569 		goto free_buf;
1570 	for (i = 0; i < num; i++) {
1571 		ctx.urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
1572 		if (!ctx.urbs[i])
1573 			goto free_urbs;
1574 		usb_fill_bulk_urb(ctx.urbs[i], udev, pipe, buf, size,
1575 				unlink_queued_callback, &ctx);
1576 		ctx.urbs[i]->transfer_dma = buf_dma;
1577 		ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
1578 
1579 		if (usb_pipeout(ctx.urbs[i]->pipe)) {
1580 			simple_fill_buf(ctx.urbs[i]);
1581 			ctx.urbs[i]->transfer_flags |= URB_ZERO_PACKET;
1582 		}
1583 	}
1584 
1585 	/* Submit all the URBs and then unlink URBs num - 4 and num - 2. */
1586 	for (i = 0; i < num; i++) {
1587 		atomic_inc(&ctx.pending);
1588 		retval = usb_submit_urb(ctx.urbs[i], GFP_KERNEL);
1589 		if (retval != 0) {
1590 			dev_err(&dev->intf->dev, "submit urbs[%d] fail %d\n",
1591 					i, retval);
1592 			atomic_dec(&ctx.pending);
1593 			ctx.status = retval;
1594 			break;
1595 		}
1596 	}
1597 	if (i == num) {
1598 		usb_unlink_urb(ctx.urbs[num - 4]);
1599 		usb_unlink_urb(ctx.urbs[num - 2]);
1600 	} else {
1601 		while (--i >= 0)
1602 			usb_unlink_urb(ctx.urbs[i]);
1603 	}
1604 
1605 	if (atomic_dec_and_test(&ctx.pending))		/* The extra count */
1606 		complete(&ctx.complete);
1607 	wait_for_completion(&ctx.complete);
1608 	retval = ctx.status;
1609 
1610  free_urbs:
1611 	for (i = 0; i < num; i++)
1612 		usb_free_urb(ctx.urbs[i]);
1613 	kfree(ctx.urbs);
1614  free_buf:
1615 	usb_free_coherent(udev, size, buf, buf_dma);
1616 	return retval;
1617 }
1618 
1619 /*-------------------------------------------------------------------------*/
1620 
1621 static int verify_not_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
1622 {
1623 	int	retval;
1624 	u16	status;
1625 
1626 	/* shouldn't look or act halted */
1627 	retval = usb_get_std_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
1628 	if (retval < 0) {
1629 		ERROR(tdev, "ep %02x couldn't get no-halt status, %d\n",
1630 				ep, retval);
1631 		return retval;
1632 	}
1633 	if (status != 0) {
1634 		ERROR(tdev, "ep %02x bogus status: %04x != 0\n", ep, status);
1635 		return -EINVAL;
1636 	}
1637 	retval = simple_io(tdev, urb, 1, 0, 0, __func__);
1638 	if (retval != 0)
1639 		return -EINVAL;
1640 	return 0;
1641 }
1642 
1643 static int verify_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
1644 {
1645 	int	retval;
1646 	u16	status;
1647 
1648 	/* should look and act halted */
1649 	retval = usb_get_std_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
1650 	if (retval < 0) {
1651 		ERROR(tdev, "ep %02x couldn't get halt status, %d\n",
1652 				ep, retval);
1653 		return retval;
1654 	}
1655 	if (status != 1) {
1656 		ERROR(tdev, "ep %02x bogus status: %04x != 1\n", ep, status);
1657 		return -EINVAL;
1658 	}
1659 	retval = simple_io(tdev, urb, 1, 0, -EPIPE, __func__);
1660 	if (retval != -EPIPE)
1661 		return -EINVAL;
1662 	retval = simple_io(tdev, urb, 1, 0, -EPIPE, "verify_still_halted");
1663 	if (retval != -EPIPE)
1664 		return -EINVAL;
1665 	return 0;
1666 }
1667 
1668 static int test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb)
1669 {
1670 	int	retval;
1671 
1672 	/* shouldn't look or act halted now */
1673 	retval = verify_not_halted(tdev, ep, urb);
1674 	if (retval < 0)
1675 		return retval;
1676 
1677 	/* set halt (protocol test only), verify it worked */
1678 	retval = usb_control_msg(urb->dev, usb_sndctrlpipe(urb->dev, 0),
1679 			USB_REQ_SET_FEATURE, USB_RECIP_ENDPOINT,
1680 			USB_ENDPOINT_HALT, ep,
1681 			NULL, 0, USB_CTRL_SET_TIMEOUT);
1682 	if (retval < 0) {
1683 		ERROR(tdev, "ep %02x couldn't set halt, %d\n", ep, retval);
1684 		return retval;
1685 	}
1686 	retval = verify_halted(tdev, ep, urb);
1687 	if (retval < 0) {
1688 		int ret;
1689 
1690 		/* clear halt anyways, else further tests will fail */
1691 		ret = usb_clear_halt(urb->dev, urb->pipe);
1692 		if (ret)
1693 			ERROR(tdev, "ep %02x couldn't clear halt, %d\n",
1694 			      ep, ret);
1695 
1696 		return retval;
1697 	}
1698 
1699 	/* clear halt (tests API + protocol), verify it worked */
1700 	retval = usb_clear_halt(urb->dev, urb->pipe);
1701 	if (retval < 0) {
1702 		ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval);
1703 		return retval;
1704 	}
1705 	retval = verify_not_halted(tdev, ep, urb);
1706 	if (retval < 0)
1707 		return retval;
1708 
1709 	/* NOTE:  could also verify SET_INTERFACE clear halts ... */
1710 
1711 	return 0;
1712 }
1713 
1714 static int test_toggle_sync(struct usbtest_dev *tdev, int ep, struct urb *urb)
1715 {
1716 	int	retval;
1717 
1718 	/* clear initial data toggle to DATA0 */
1719 	retval = usb_clear_halt(urb->dev, urb->pipe);
1720 	if (retval < 0) {
1721 		ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval);
1722 		return retval;
1723 	}
1724 
1725 	/* transfer 3 data packets, should be DATA0, DATA1, DATA0 */
1726 	retval = simple_io(tdev, urb, 1, 0, 0, __func__);
1727 	if (retval != 0)
1728 		return -EINVAL;
1729 
1730 	/* clear halt resets device side data toggle, host should react to it */
1731 	retval = usb_clear_halt(urb->dev, urb->pipe);
1732 	if (retval < 0) {
1733 		ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval);
1734 		return retval;
1735 	}
1736 
1737 	/* host should use DATA0 again after clear halt */
1738 	retval = simple_io(tdev, urb, 1, 0, 0, __func__);
1739 
1740 	return retval;
1741 }
1742 
1743 static int halt_simple(struct usbtest_dev *dev)
1744 {
1745 	int			ep;
1746 	int			retval = 0;
1747 	struct urb		*urb;
1748 	struct usb_device	*udev = testdev_to_usbdev(dev);
1749 
1750 	if (udev->speed == USB_SPEED_SUPER)
1751 		urb = simple_alloc_urb(udev, 0, 1024, 0);
1752 	else
1753 		urb = simple_alloc_urb(udev, 0, 512, 0);
1754 	if (urb == NULL)
1755 		return -ENOMEM;
1756 
1757 	if (dev->in_pipe) {
1758 		ep = usb_pipeendpoint(dev->in_pipe) | USB_DIR_IN;
1759 		urb->pipe = dev->in_pipe;
1760 		retval = test_halt(dev, ep, urb);
1761 		if (retval < 0)
1762 			goto done;
1763 	}
1764 
1765 	if (dev->out_pipe) {
1766 		ep = usb_pipeendpoint(dev->out_pipe);
1767 		urb->pipe = dev->out_pipe;
1768 		retval = test_halt(dev, ep, urb);
1769 	}
1770 done:
1771 	simple_free_urb(urb);
1772 	return retval;
1773 }
1774 
1775 static int toggle_sync_simple(struct usbtest_dev *dev)
1776 {
1777 	int			ep;
1778 	int			retval = 0;
1779 	struct urb		*urb;
1780 	struct usb_device	*udev = testdev_to_usbdev(dev);
1781 	unsigned		maxp = get_maxpacket(udev, dev->out_pipe);
1782 
1783 	/*
1784 	 * Create a URB that causes a transfer of uneven amount of data packets
1785 	 * This way the clear toggle has an impact on the data toggle sequence.
1786 	 * Use 2 maxpacket length packets and one zero packet.
1787 	 */
1788 	urb = simple_alloc_urb(udev, 0,  2 * maxp, 0);
1789 	if (urb == NULL)
1790 		return -ENOMEM;
1791 
1792 	urb->transfer_flags |= URB_ZERO_PACKET;
1793 
1794 	ep = usb_pipeendpoint(dev->out_pipe);
1795 	urb->pipe = dev->out_pipe;
1796 	retval = test_toggle_sync(dev, ep, urb);
1797 
1798 	simple_free_urb(urb);
1799 	return retval;
1800 }
1801 
1802 /*-------------------------------------------------------------------------*/
1803 
1804 /* Control OUT tests use the vendor control requests from Intel's
1805  * USB 2.0 compliance test device:  write a buffer, read it back.
1806  *
1807  * Intel's spec only _requires_ that it work for one packet, which
1808  * is pretty weak.   Some HCDs place limits here; most devices will
1809  * need to be able to handle more than one OUT data packet.  We'll
1810  * try whatever we're told to try.
1811  */
1812 static int ctrl_out(struct usbtest_dev *dev,
1813 		unsigned count, unsigned length, unsigned vary, unsigned offset)
1814 {
1815 	unsigned		i, j, len;
1816 	int			retval;
1817 	u8			*buf;
1818 	char			*what = "?";
1819 	struct usb_device	*udev;
1820 
1821 	if (length < 1 || length > 0xffff || vary >= length)
1822 		return -EINVAL;
1823 
1824 	buf = kmalloc(length + offset, GFP_KERNEL);
1825 	if (!buf)
1826 		return -ENOMEM;
1827 
1828 	buf += offset;
1829 	udev = testdev_to_usbdev(dev);
1830 	len = length;
1831 	retval = 0;
1832 
1833 	/* NOTE:  hardware might well act differently if we pushed it
1834 	 * with lots back-to-back queued requests.
1835 	 */
1836 	for (i = 0; i < count; i++) {
1837 		/* write patterned data */
1838 		for (j = 0; j < len; j++)
1839 			buf[j] = (u8)(i + j);
1840 		retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
1841 				0x5b, USB_DIR_OUT|USB_TYPE_VENDOR,
1842 				0, 0, buf, len, USB_CTRL_SET_TIMEOUT);
1843 		if (retval != len) {
1844 			what = "write";
1845 			if (retval >= 0) {
1846 				ERROR(dev, "ctrl_out, wlen %d (expected %d)\n",
1847 						retval, len);
1848 				retval = -EBADMSG;
1849 			}
1850 			break;
1851 		}
1852 
1853 		/* read it back -- assuming nothing intervened!!  */
1854 		retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
1855 				0x5c, USB_DIR_IN|USB_TYPE_VENDOR,
1856 				0, 0, buf, len, USB_CTRL_GET_TIMEOUT);
1857 		if (retval != len) {
1858 			what = "read";
1859 			if (retval >= 0) {
1860 				ERROR(dev, "ctrl_out, rlen %d (expected %d)\n",
1861 						retval, len);
1862 				retval = -EBADMSG;
1863 			}
1864 			break;
1865 		}
1866 
1867 		/* fail if we can't verify */
1868 		for (j = 0; j < len; j++) {
1869 			if (buf[j] != (u8)(i + j)) {
1870 				ERROR(dev, "ctrl_out, byte %d is %d not %d\n",
1871 					j, buf[j], (u8)(i + j));
1872 				retval = -EBADMSG;
1873 				break;
1874 			}
1875 		}
1876 		if (retval < 0) {
1877 			what = "verify";
1878 			break;
1879 		}
1880 
1881 		len += vary;
1882 
1883 		/* [real world] the "zero bytes IN" case isn't really used.
1884 		 * hardware can easily trip up in this weird case, since its
1885 		 * status stage is IN, not OUT like other ep0in transfers.
1886 		 */
1887 		if (len > length)
1888 			len = realworld ? 1 : 0;
1889 	}
1890 
1891 	if (retval < 0)
1892 		ERROR(dev, "ctrl_out %s failed, code %d, count %d\n",
1893 			what, retval, i);
1894 
1895 	kfree(buf - offset);
1896 	return retval;
1897 }
1898 
1899 /*-------------------------------------------------------------------------*/
1900 
1901 /* ISO/BULK tests ... mimics common usage
1902  *  - buffer length is split into N packets (mostly maxpacket sized)
1903  *  - multi-buffers according to sglen
1904  */
1905 
1906 struct transfer_context {
1907 	unsigned		count;
1908 	unsigned		pending;
1909 	spinlock_t		lock;
1910 	struct completion	done;
1911 	int			submit_error;
1912 	unsigned long		errors;
1913 	unsigned long		packet_count;
1914 	struct usbtest_dev	*dev;
1915 	bool			is_iso;
1916 };
1917 
1918 static void complicated_callback(struct urb *urb)
1919 {
1920 	struct transfer_context	*ctx = urb->context;
1921 	unsigned long flags;
1922 
1923 	spin_lock_irqsave(&ctx->lock, flags);
1924 	ctx->count--;
1925 
1926 	ctx->packet_count += urb->number_of_packets;
1927 	if (urb->error_count > 0)
1928 		ctx->errors += urb->error_count;
1929 	else if (urb->status != 0)
1930 		ctx->errors += (ctx->is_iso ? urb->number_of_packets : 1);
1931 	else if (urb->actual_length != urb->transfer_buffer_length)
1932 		ctx->errors++;
1933 	else if (check_guard_bytes(ctx->dev, urb) != 0)
1934 		ctx->errors++;
1935 
1936 	if (urb->status == 0 && ctx->count > (ctx->pending - 1)
1937 			&& !ctx->submit_error) {
1938 		int status = usb_submit_urb(urb, GFP_ATOMIC);
1939 		switch (status) {
1940 		case 0:
1941 			goto done;
1942 		default:
1943 			dev_err(&ctx->dev->intf->dev,
1944 					"resubmit err %d\n",
1945 					status);
1946 			/* FALLTHROUGH */
1947 		case -ENODEV:			/* disconnected */
1948 		case -ESHUTDOWN:		/* endpoint disabled */
1949 			ctx->submit_error = 1;
1950 			break;
1951 		}
1952 	}
1953 
1954 	ctx->pending--;
1955 	if (ctx->pending == 0) {
1956 		if (ctx->errors)
1957 			dev_err(&ctx->dev->intf->dev,
1958 				"during the test, %lu errors out of %lu\n",
1959 				ctx->errors, ctx->packet_count);
1960 		complete(&ctx->done);
1961 	}
1962 done:
1963 	spin_unlock_irqrestore(&ctx->lock, flags);
1964 }
1965 
1966 static struct urb *iso_alloc_urb(
1967 	struct usb_device	*udev,
1968 	int			pipe,
1969 	struct usb_endpoint_descriptor	*desc,
1970 	long			bytes,
1971 	unsigned offset
1972 )
1973 {
1974 	struct urb		*urb;
1975 	unsigned		i, maxp, packets;
1976 
1977 	if (bytes < 0 || !desc)
1978 		return NULL;
1979 	maxp = usb_endpoint_maxp(desc);
1980 	maxp *= usb_endpoint_maxp_mult(desc);
1981 	packets = DIV_ROUND_UP(bytes, maxp);
1982 
1983 	urb = usb_alloc_urb(packets, GFP_KERNEL);
1984 	if (!urb)
1985 		return urb;
1986 	urb->dev = udev;
1987 	urb->pipe = pipe;
1988 
1989 	urb->number_of_packets = packets;
1990 	urb->transfer_buffer_length = bytes;
1991 	urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
1992 							GFP_KERNEL,
1993 							&urb->transfer_dma);
1994 	if (!urb->transfer_buffer) {
1995 		usb_free_urb(urb);
1996 		return NULL;
1997 	}
1998 	if (offset) {
1999 		memset(urb->transfer_buffer, GUARD_BYTE, offset);
2000 		urb->transfer_buffer += offset;
2001 		urb->transfer_dma += offset;
2002 	}
2003 	/* For inbound transfers use guard byte so that test fails if
2004 		data not correctly copied */
2005 	memset(urb->transfer_buffer,
2006 			usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
2007 			bytes);
2008 
2009 	for (i = 0; i < packets; i++) {
2010 		/* here, only the last packet will be short */
2011 		urb->iso_frame_desc[i].length = min((unsigned) bytes, maxp);
2012 		bytes -= urb->iso_frame_desc[i].length;
2013 
2014 		urb->iso_frame_desc[i].offset = maxp * i;
2015 	}
2016 
2017 	urb->complete = complicated_callback;
2018 	/* urb->context = SET BY CALLER */
2019 	urb->interval = 1 << (desc->bInterval - 1);
2020 	urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
2021 	return urb;
2022 }
2023 
2024 static int
2025 test_queue(struct usbtest_dev *dev, struct usbtest_param_32 *param,
2026 		int pipe, struct usb_endpoint_descriptor *desc, unsigned offset)
2027 {
2028 	struct transfer_context	context;
2029 	struct usb_device	*udev;
2030 	unsigned		i;
2031 	unsigned long		packets = 0;
2032 	int			status = 0;
2033 	struct urb		*urbs[MAX_SGLEN];
2034 
2035 	if (!param->sglen || param->iterations > UINT_MAX / param->sglen)
2036 		return -EINVAL;
2037 
2038 	if (param->sglen > MAX_SGLEN)
2039 		return -EINVAL;
2040 
2041 	memset(&context, 0, sizeof(context));
2042 	context.count = param->iterations * param->sglen;
2043 	context.dev = dev;
2044 	context.is_iso = !!desc;
2045 	init_completion(&context.done);
2046 	spin_lock_init(&context.lock);
2047 
2048 	udev = testdev_to_usbdev(dev);
2049 
2050 	for (i = 0; i < param->sglen; i++) {
2051 		if (context.is_iso)
2052 			urbs[i] = iso_alloc_urb(udev, pipe, desc,
2053 					param->length, offset);
2054 		else
2055 			urbs[i] = complicated_alloc_urb(udev, pipe,
2056 					param->length, 0);
2057 
2058 		if (!urbs[i]) {
2059 			status = -ENOMEM;
2060 			goto fail;
2061 		}
2062 		packets += urbs[i]->number_of_packets;
2063 		urbs[i]->context = &context;
2064 	}
2065 	packets *= param->iterations;
2066 
2067 	if (context.is_iso) {
2068 		dev_info(&dev->intf->dev,
2069 			"iso period %d %sframes, wMaxPacket %d, transactions: %d\n",
2070 			1 << (desc->bInterval - 1),
2071 			(udev->speed == USB_SPEED_HIGH) ? "micro" : "",
2072 			usb_endpoint_maxp(desc),
2073 			usb_endpoint_maxp_mult(desc));
2074 
2075 		dev_info(&dev->intf->dev,
2076 			"total %lu msec (%lu packets)\n",
2077 			(packets * (1 << (desc->bInterval - 1)))
2078 				/ ((udev->speed == USB_SPEED_HIGH) ? 8 : 1),
2079 			packets);
2080 	}
2081 
2082 	spin_lock_irq(&context.lock);
2083 	for (i = 0; i < param->sglen; i++) {
2084 		++context.pending;
2085 		status = usb_submit_urb(urbs[i], GFP_ATOMIC);
2086 		if (status < 0) {
2087 			ERROR(dev, "submit iso[%d], error %d\n", i, status);
2088 			if (i == 0) {
2089 				spin_unlock_irq(&context.lock);
2090 				goto fail;
2091 			}
2092 
2093 			simple_free_urb(urbs[i]);
2094 			urbs[i] = NULL;
2095 			context.pending--;
2096 			context.submit_error = 1;
2097 			break;
2098 		}
2099 	}
2100 	spin_unlock_irq(&context.lock);
2101 
2102 	wait_for_completion(&context.done);
2103 
2104 	for (i = 0; i < param->sglen; i++) {
2105 		if (urbs[i])
2106 			simple_free_urb(urbs[i]);
2107 	}
2108 	/*
2109 	 * Isochronous transfers are expected to fail sometimes.  As an
2110 	 * arbitrary limit, we will report an error if any submissions
2111 	 * fail or if the transfer failure rate is > 10%.
2112 	 */
2113 	if (status != 0)
2114 		;
2115 	else if (context.submit_error)
2116 		status = -EACCES;
2117 	else if (context.errors >
2118 			(context.is_iso ? context.packet_count / 10 : 0))
2119 		status = -EIO;
2120 	return status;
2121 
2122 fail:
2123 	for (i = 0; i < param->sglen; i++) {
2124 		if (urbs[i])
2125 			simple_free_urb(urbs[i]);
2126 	}
2127 	return status;
2128 }
2129 
2130 static int test_unaligned_bulk(
2131 	struct usbtest_dev *tdev,
2132 	int pipe,
2133 	unsigned length,
2134 	int iterations,
2135 	unsigned transfer_flags,
2136 	const char *label)
2137 {
2138 	int retval;
2139 	struct urb *urb = usbtest_alloc_urb(testdev_to_usbdev(tdev),
2140 			pipe, length, transfer_flags, 1, 0, simple_callback);
2141 
2142 	if (!urb)
2143 		return -ENOMEM;
2144 
2145 	retval = simple_io(tdev, urb, iterations, 0, 0, label);
2146 	simple_free_urb(urb);
2147 	return retval;
2148 }
2149 
2150 /* Run tests. */
2151 static int
2152 usbtest_do_ioctl(struct usb_interface *intf, struct usbtest_param_32 *param)
2153 {
2154 	struct usbtest_dev	*dev = usb_get_intfdata(intf);
2155 	struct usb_device	*udev = testdev_to_usbdev(dev);
2156 	struct urb		*urb;
2157 	struct scatterlist	*sg;
2158 	struct usb_sg_request	req;
2159 	unsigned		i;
2160 	int	retval = -EOPNOTSUPP;
2161 
2162 	if (param->iterations <= 0)
2163 		return -EINVAL;
2164 	if (param->sglen > MAX_SGLEN)
2165 		return -EINVAL;
2166 	/*
2167 	 * Just a bunch of test cases that every HCD is expected to handle.
2168 	 *
2169 	 * Some may need specific firmware, though it'd be good to have
2170 	 * one firmware image to handle all the test cases.
2171 	 *
2172 	 * FIXME add more tests!  cancel requests, verify the data, control
2173 	 * queueing, concurrent read+write threads, and so on.
2174 	 */
2175 	switch (param->test_num) {
2176 
2177 	case 0:
2178 		dev_info(&intf->dev, "TEST 0:  NOP\n");
2179 		retval = 0;
2180 		break;
2181 
2182 	/* Simple non-queued bulk I/O tests */
2183 	case 1:
2184 		if (dev->out_pipe == 0)
2185 			break;
2186 		dev_info(&intf->dev,
2187 				"TEST 1:  write %d bytes %u times\n",
2188 				param->length, param->iterations);
2189 		urb = simple_alloc_urb(udev, dev->out_pipe, param->length, 0);
2190 		if (!urb) {
2191 			retval = -ENOMEM;
2192 			break;
2193 		}
2194 		/* FIRMWARE:  bulk sink (maybe accepts short writes) */
2195 		retval = simple_io(dev, urb, param->iterations, 0, 0, "test1");
2196 		simple_free_urb(urb);
2197 		break;
2198 	case 2:
2199 		if (dev->in_pipe == 0)
2200 			break;
2201 		dev_info(&intf->dev,
2202 				"TEST 2:  read %d bytes %u times\n",
2203 				param->length, param->iterations);
2204 		urb = simple_alloc_urb(udev, dev->in_pipe, param->length, 0);
2205 		if (!urb) {
2206 			retval = -ENOMEM;
2207 			break;
2208 		}
2209 		/* FIRMWARE:  bulk source (maybe generates short writes) */
2210 		retval = simple_io(dev, urb, param->iterations, 0, 0, "test2");
2211 		simple_free_urb(urb);
2212 		break;
2213 	case 3:
2214 		if (dev->out_pipe == 0 || param->vary == 0)
2215 			break;
2216 		dev_info(&intf->dev,
2217 				"TEST 3:  write/%d 0..%d bytes %u times\n",
2218 				param->vary, param->length, param->iterations);
2219 		urb = simple_alloc_urb(udev, dev->out_pipe, param->length, 0);
2220 		if (!urb) {
2221 			retval = -ENOMEM;
2222 			break;
2223 		}
2224 		/* FIRMWARE:  bulk sink (maybe accepts short writes) */
2225 		retval = simple_io(dev, urb, param->iterations, param->vary,
2226 					0, "test3");
2227 		simple_free_urb(urb);
2228 		break;
2229 	case 4:
2230 		if (dev->in_pipe == 0 || param->vary == 0)
2231 			break;
2232 		dev_info(&intf->dev,
2233 				"TEST 4:  read/%d 0..%d bytes %u times\n",
2234 				param->vary, param->length, param->iterations);
2235 		urb = simple_alloc_urb(udev, dev->in_pipe, param->length, 0);
2236 		if (!urb) {
2237 			retval = -ENOMEM;
2238 			break;
2239 		}
2240 		/* FIRMWARE:  bulk source (maybe generates short writes) */
2241 		retval = simple_io(dev, urb, param->iterations, param->vary,
2242 					0, "test4");
2243 		simple_free_urb(urb);
2244 		break;
2245 
2246 	/* Queued bulk I/O tests */
2247 	case 5:
2248 		if (dev->out_pipe == 0 || param->sglen == 0)
2249 			break;
2250 		dev_info(&intf->dev,
2251 			"TEST 5:  write %d sglists %d entries of %d bytes\n",
2252 				param->iterations,
2253 				param->sglen, param->length);
2254 		sg = alloc_sglist(param->sglen, param->length,
2255 				0, dev, dev->out_pipe);
2256 		if (!sg) {
2257 			retval = -ENOMEM;
2258 			break;
2259 		}
2260 		/* FIRMWARE:  bulk sink (maybe accepts short writes) */
2261 		retval = perform_sglist(dev, param->iterations, dev->out_pipe,
2262 				&req, sg, param->sglen);
2263 		free_sglist(sg, param->sglen);
2264 		break;
2265 
2266 	case 6:
2267 		if (dev->in_pipe == 0 || param->sglen == 0)
2268 			break;
2269 		dev_info(&intf->dev,
2270 			"TEST 6:  read %d sglists %d entries of %d bytes\n",
2271 				param->iterations,
2272 				param->sglen, param->length);
2273 		sg = alloc_sglist(param->sglen, param->length,
2274 				0, dev, dev->in_pipe);
2275 		if (!sg) {
2276 			retval = -ENOMEM;
2277 			break;
2278 		}
2279 		/* FIRMWARE:  bulk source (maybe generates short writes) */
2280 		retval = perform_sglist(dev, param->iterations, dev->in_pipe,
2281 				&req, sg, param->sglen);
2282 		free_sglist(sg, param->sglen);
2283 		break;
2284 	case 7:
2285 		if (dev->out_pipe == 0 || param->sglen == 0 || param->vary == 0)
2286 			break;
2287 		dev_info(&intf->dev,
2288 			"TEST 7:  write/%d %d sglists %d entries 0..%d bytes\n",
2289 				param->vary, param->iterations,
2290 				param->sglen, param->length);
2291 		sg = alloc_sglist(param->sglen, param->length,
2292 				param->vary, dev, dev->out_pipe);
2293 		if (!sg) {
2294 			retval = -ENOMEM;
2295 			break;
2296 		}
2297 		/* FIRMWARE:  bulk sink (maybe accepts short writes) */
2298 		retval = perform_sglist(dev, param->iterations, dev->out_pipe,
2299 				&req, sg, param->sglen);
2300 		free_sglist(sg, param->sglen);
2301 		break;
2302 	case 8:
2303 		if (dev->in_pipe == 0 || param->sglen == 0 || param->vary == 0)
2304 			break;
2305 		dev_info(&intf->dev,
2306 			"TEST 8:  read/%d %d sglists %d entries 0..%d bytes\n",
2307 				param->vary, param->iterations,
2308 				param->sglen, param->length);
2309 		sg = alloc_sglist(param->sglen, param->length,
2310 				param->vary, dev, dev->in_pipe);
2311 		if (!sg) {
2312 			retval = -ENOMEM;
2313 			break;
2314 		}
2315 		/* FIRMWARE:  bulk source (maybe generates short writes) */
2316 		retval = perform_sglist(dev, param->iterations, dev->in_pipe,
2317 				&req, sg, param->sglen);
2318 		free_sglist(sg, param->sglen);
2319 		break;
2320 
2321 	/* non-queued sanity tests for control (chapter 9 subset) */
2322 	case 9:
2323 		retval = 0;
2324 		dev_info(&intf->dev,
2325 			"TEST 9:  ch9 (subset) control tests, %d times\n",
2326 				param->iterations);
2327 		for (i = param->iterations; retval == 0 && i--; /* NOP */)
2328 			retval = ch9_postconfig(dev);
2329 		if (retval)
2330 			dev_err(&intf->dev, "ch9 subset failed, "
2331 					"iterations left %d\n", i);
2332 		break;
2333 
2334 	/* queued control messaging */
2335 	case 10:
2336 		retval = 0;
2337 		dev_info(&intf->dev,
2338 				"TEST 10:  queue %d control calls, %d times\n",
2339 				param->sglen,
2340 				param->iterations);
2341 		retval = test_ctrl_queue(dev, param);
2342 		break;
2343 
2344 	/* simple non-queued unlinks (ring with one urb) */
2345 	case 11:
2346 		if (dev->in_pipe == 0 || !param->length)
2347 			break;
2348 		retval = 0;
2349 		dev_info(&intf->dev, "TEST 11:  unlink %d reads of %d\n",
2350 				param->iterations, param->length);
2351 		for (i = param->iterations; retval == 0 && i--; /* NOP */)
2352 			retval = unlink_simple(dev, dev->in_pipe,
2353 						param->length);
2354 		if (retval)
2355 			dev_err(&intf->dev, "unlink reads failed %d, "
2356 				"iterations left %d\n", retval, i);
2357 		break;
2358 	case 12:
2359 		if (dev->out_pipe == 0 || !param->length)
2360 			break;
2361 		retval = 0;
2362 		dev_info(&intf->dev, "TEST 12:  unlink %d writes of %d\n",
2363 				param->iterations, param->length);
2364 		for (i = param->iterations; retval == 0 && i--; /* NOP */)
2365 			retval = unlink_simple(dev, dev->out_pipe,
2366 						param->length);
2367 		if (retval)
2368 			dev_err(&intf->dev, "unlink writes failed %d, "
2369 				"iterations left %d\n", retval, i);
2370 		break;
2371 
2372 	/* ep halt tests */
2373 	case 13:
2374 		if (dev->out_pipe == 0 && dev->in_pipe == 0)
2375 			break;
2376 		retval = 0;
2377 		dev_info(&intf->dev, "TEST 13:  set/clear %d halts\n",
2378 				param->iterations);
2379 		for (i = param->iterations; retval == 0 && i--; /* NOP */)
2380 			retval = halt_simple(dev);
2381 
2382 		if (retval)
2383 			ERROR(dev, "halts failed, iterations left %d\n", i);
2384 		break;
2385 
2386 	/* control write tests */
2387 	case 14:
2388 		if (!dev->info->ctrl_out)
2389 			break;
2390 		dev_info(&intf->dev, "TEST 14:  %d ep0out, %d..%d vary %d\n",
2391 				param->iterations,
2392 				realworld ? 1 : 0, param->length,
2393 				param->vary);
2394 		retval = ctrl_out(dev, param->iterations,
2395 				param->length, param->vary, 0);
2396 		break;
2397 
2398 	/* iso write tests */
2399 	case 15:
2400 		if (dev->out_iso_pipe == 0 || param->sglen == 0)
2401 			break;
2402 		dev_info(&intf->dev,
2403 			"TEST 15:  write %d iso, %d entries of %d bytes\n",
2404 				param->iterations,
2405 				param->sglen, param->length);
2406 		/* FIRMWARE:  iso sink */
2407 		retval = test_queue(dev, param,
2408 				dev->out_iso_pipe, dev->iso_out, 0);
2409 		break;
2410 
2411 	/* iso read tests */
2412 	case 16:
2413 		if (dev->in_iso_pipe == 0 || param->sglen == 0)
2414 			break;
2415 		dev_info(&intf->dev,
2416 			"TEST 16:  read %d iso, %d entries of %d bytes\n",
2417 				param->iterations,
2418 				param->sglen, param->length);
2419 		/* FIRMWARE:  iso source */
2420 		retval = test_queue(dev, param,
2421 				dev->in_iso_pipe, dev->iso_in, 0);
2422 		break;
2423 
2424 	/* FIXME scatterlist cancel (needs helper thread) */
2425 
2426 	/* Tests for bulk I/O using DMA mapping by core and odd address */
2427 	case 17:
2428 		if (dev->out_pipe == 0)
2429 			break;
2430 		dev_info(&intf->dev,
2431 			"TEST 17:  write odd addr %d bytes %u times core map\n",
2432 			param->length, param->iterations);
2433 
2434 		retval = test_unaligned_bulk(
2435 				dev, dev->out_pipe,
2436 				param->length, param->iterations,
2437 				0, "test17");
2438 		break;
2439 
2440 	case 18:
2441 		if (dev->in_pipe == 0)
2442 			break;
2443 		dev_info(&intf->dev,
2444 			"TEST 18:  read odd addr %d bytes %u times core map\n",
2445 			param->length, param->iterations);
2446 
2447 		retval = test_unaligned_bulk(
2448 				dev, dev->in_pipe,
2449 				param->length, param->iterations,
2450 				0, "test18");
2451 		break;
2452 
2453 	/* Tests for bulk I/O using premapped coherent buffer and odd address */
2454 	case 19:
2455 		if (dev->out_pipe == 0)
2456 			break;
2457 		dev_info(&intf->dev,
2458 			"TEST 19:  write odd addr %d bytes %u times premapped\n",
2459 			param->length, param->iterations);
2460 
2461 		retval = test_unaligned_bulk(
2462 				dev, dev->out_pipe,
2463 				param->length, param->iterations,
2464 				URB_NO_TRANSFER_DMA_MAP, "test19");
2465 		break;
2466 
2467 	case 20:
2468 		if (dev->in_pipe == 0)
2469 			break;
2470 		dev_info(&intf->dev,
2471 			"TEST 20:  read odd addr %d bytes %u times premapped\n",
2472 			param->length, param->iterations);
2473 
2474 		retval = test_unaligned_bulk(
2475 				dev, dev->in_pipe,
2476 				param->length, param->iterations,
2477 				URB_NO_TRANSFER_DMA_MAP, "test20");
2478 		break;
2479 
2480 	/* control write tests with unaligned buffer */
2481 	case 21:
2482 		if (!dev->info->ctrl_out)
2483 			break;
2484 		dev_info(&intf->dev,
2485 				"TEST 21:  %d ep0out odd addr, %d..%d vary %d\n",
2486 				param->iterations,
2487 				realworld ? 1 : 0, param->length,
2488 				param->vary);
2489 		retval = ctrl_out(dev, param->iterations,
2490 				param->length, param->vary, 1);
2491 		break;
2492 
2493 	/* unaligned iso tests */
2494 	case 22:
2495 		if (dev->out_iso_pipe == 0 || param->sglen == 0)
2496 			break;
2497 		dev_info(&intf->dev,
2498 			"TEST 22:  write %d iso odd, %d entries of %d bytes\n",
2499 				param->iterations,
2500 				param->sglen, param->length);
2501 		retval = test_queue(dev, param,
2502 				dev->out_iso_pipe, dev->iso_out, 1);
2503 		break;
2504 
2505 	case 23:
2506 		if (dev->in_iso_pipe == 0 || param->sglen == 0)
2507 			break;
2508 		dev_info(&intf->dev,
2509 			"TEST 23:  read %d iso odd, %d entries of %d bytes\n",
2510 				param->iterations,
2511 				param->sglen, param->length);
2512 		retval = test_queue(dev, param,
2513 				dev->in_iso_pipe, dev->iso_in, 1);
2514 		break;
2515 
2516 	/* unlink URBs from a bulk-OUT queue */
2517 	case 24:
2518 		if (dev->out_pipe == 0 || !param->length || param->sglen < 4)
2519 			break;
2520 		retval = 0;
2521 		dev_info(&intf->dev, "TEST 24:  unlink from %d queues of "
2522 				"%d %d-byte writes\n",
2523 				param->iterations, param->sglen, param->length);
2524 		for (i = param->iterations; retval == 0 && i > 0; --i) {
2525 			retval = unlink_queued(dev, dev->out_pipe,
2526 						param->sglen, param->length);
2527 			if (retval) {
2528 				dev_err(&intf->dev,
2529 					"unlink queued writes failed %d, "
2530 					"iterations left %d\n", retval, i);
2531 				break;
2532 			}
2533 		}
2534 		break;
2535 
2536 	/* Simple non-queued interrupt I/O tests */
2537 	case 25:
2538 		if (dev->out_int_pipe == 0)
2539 			break;
2540 		dev_info(&intf->dev,
2541 				"TEST 25: write %d bytes %u times\n",
2542 				param->length, param->iterations);
2543 		urb = simple_alloc_urb(udev, dev->out_int_pipe, param->length,
2544 				dev->int_out->bInterval);
2545 		if (!urb) {
2546 			retval = -ENOMEM;
2547 			break;
2548 		}
2549 		/* FIRMWARE: interrupt sink (maybe accepts short writes) */
2550 		retval = simple_io(dev, urb, param->iterations, 0, 0, "test25");
2551 		simple_free_urb(urb);
2552 		break;
2553 	case 26:
2554 		if (dev->in_int_pipe == 0)
2555 			break;
2556 		dev_info(&intf->dev,
2557 				"TEST 26: read %d bytes %u times\n",
2558 				param->length, param->iterations);
2559 		urb = simple_alloc_urb(udev, dev->in_int_pipe, param->length,
2560 				dev->int_in->bInterval);
2561 		if (!urb) {
2562 			retval = -ENOMEM;
2563 			break;
2564 		}
2565 		/* FIRMWARE: interrupt source (maybe generates short writes) */
2566 		retval = simple_io(dev, urb, param->iterations, 0, 0, "test26");
2567 		simple_free_urb(urb);
2568 		break;
2569 	case 27:
2570 		/* We do performance test, so ignore data compare */
2571 		if (dev->out_pipe == 0 || param->sglen == 0 || pattern != 0)
2572 			break;
2573 		dev_info(&intf->dev,
2574 			"TEST 27: bulk write %dMbytes\n", (param->iterations *
2575 			param->sglen * param->length) / (1024 * 1024));
2576 		retval = test_queue(dev, param,
2577 				dev->out_pipe, NULL, 0);
2578 		break;
2579 	case 28:
2580 		if (dev->in_pipe == 0 || param->sglen == 0 || pattern != 0)
2581 			break;
2582 		dev_info(&intf->dev,
2583 			"TEST 28: bulk read %dMbytes\n", (param->iterations *
2584 			param->sglen * param->length) / (1024 * 1024));
2585 		retval = test_queue(dev, param,
2586 				dev->in_pipe, NULL, 0);
2587 		break;
2588 	/* Test data Toggle/seq_nr clear between bulk out transfers */
2589 	case 29:
2590 		if (dev->out_pipe == 0)
2591 			break;
2592 		retval = 0;
2593 		dev_info(&intf->dev, "TEST 29: Clear toggle between bulk writes %d times\n",
2594 				param->iterations);
2595 		for (i = param->iterations; retval == 0 && i > 0; --i)
2596 			retval = toggle_sync_simple(dev);
2597 
2598 		if (retval)
2599 			ERROR(dev, "toggle sync failed, iterations left %d\n",
2600 			      i);
2601 		break;
2602 	}
2603 	return retval;
2604 }
2605 
2606 /*-------------------------------------------------------------------------*/
2607 
2608 /* We only have this one interface to user space, through usbfs.
2609  * User mode code can scan usbfs to find N different devices (maybe on
2610  * different busses) to use when testing, and allocate one thread per
2611  * test.  So discovery is simplified, and we have no device naming issues.
2612  *
2613  * Don't use these only as stress/load tests.  Use them along with with
2614  * other USB bus activity:  plugging, unplugging, mousing, mp3 playback,
2615  * video capture, and so on.  Run different tests at different times, in
2616  * different sequences.  Nothing here should interact with other devices,
2617  * except indirectly by consuming USB bandwidth and CPU resources for test
2618  * threads and request completion.  But the only way to know that for sure
2619  * is to test when HC queues are in use by many devices.
2620  *
2621  * WARNING:  Because usbfs grabs udev->dev.sem before calling this ioctl(),
2622  * it locks out usbcore in certain code paths.  Notably, if you disconnect
2623  * the device-under-test, hub_wq will wait block forever waiting for the
2624  * ioctl to complete ... so that usb_disconnect() can abort the pending
2625  * urbs and then call usbtest_disconnect().  To abort a test, you're best
2626  * off just killing the userspace task and waiting for it to exit.
2627  */
2628 
2629 static int
2630 usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
2631 {
2632 
2633 	struct usbtest_dev	*dev = usb_get_intfdata(intf);
2634 	struct usbtest_param_64 *param_64 = buf;
2635 	struct usbtest_param_32 temp;
2636 	struct usbtest_param_32 *param_32 = buf;
2637 	struct timespec64 start;
2638 	struct timespec64 end;
2639 	struct timespec64 duration;
2640 	int retval = -EOPNOTSUPP;
2641 
2642 	/* FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is. */
2643 
2644 	pattern = mod_pattern;
2645 
2646 	if (mutex_lock_interruptible(&dev->lock))
2647 		return -ERESTARTSYS;
2648 
2649 	/* FIXME: What if a system sleep starts while a test is running? */
2650 
2651 	/* some devices, like ez-usb default devices, need a non-default
2652 	 * altsetting to have any active endpoints.  some tests change
2653 	 * altsettings; force a default so most tests don't need to check.
2654 	 */
2655 	if (dev->info->alt >= 0) {
2656 		if (intf->altsetting->desc.bInterfaceNumber) {
2657 			retval = -ENODEV;
2658 			goto free_mutex;
2659 		}
2660 		retval = set_altsetting(dev, dev->info->alt);
2661 		if (retval) {
2662 			dev_err(&intf->dev,
2663 					"set altsetting to %d failed, %d\n",
2664 					dev->info->alt, retval);
2665 			goto free_mutex;
2666 		}
2667 	}
2668 
2669 	switch (code) {
2670 	case USBTEST_REQUEST_64:
2671 		temp.test_num = param_64->test_num;
2672 		temp.iterations = param_64->iterations;
2673 		temp.length = param_64->length;
2674 		temp.sglen = param_64->sglen;
2675 		temp.vary = param_64->vary;
2676 		param_32 = &temp;
2677 		break;
2678 
2679 	case USBTEST_REQUEST_32:
2680 		break;
2681 
2682 	default:
2683 		retval = -EOPNOTSUPP;
2684 		goto free_mutex;
2685 	}
2686 
2687 	ktime_get_ts64(&start);
2688 
2689 	retval = usbtest_do_ioctl(intf, param_32);
2690 	if (retval < 0)
2691 		goto free_mutex;
2692 
2693 	ktime_get_ts64(&end);
2694 
2695 	duration = timespec64_sub(end, start);
2696 
2697 	temp.duration_sec = duration.tv_sec;
2698 	temp.duration_usec = duration.tv_nsec/NSEC_PER_USEC;
2699 
2700 	switch (code) {
2701 	case USBTEST_REQUEST_32:
2702 		param_32->duration_sec = temp.duration_sec;
2703 		param_32->duration_usec = temp.duration_usec;
2704 		break;
2705 
2706 	case USBTEST_REQUEST_64:
2707 		param_64->duration_sec = temp.duration_sec;
2708 		param_64->duration_usec = temp.duration_usec;
2709 		break;
2710 	}
2711 
2712 free_mutex:
2713 	mutex_unlock(&dev->lock);
2714 	return retval;
2715 }
2716 
2717 /*-------------------------------------------------------------------------*/
2718 
2719 static unsigned force_interrupt;
2720 module_param(force_interrupt, uint, 0);
2721 MODULE_PARM_DESC(force_interrupt, "0 = test default; else interrupt");
2722 
2723 #ifdef	GENERIC
2724 static unsigned short vendor;
2725 module_param(vendor, ushort, 0);
2726 MODULE_PARM_DESC(vendor, "vendor code (from usb-if)");
2727 
2728 static unsigned short product;
2729 module_param(product, ushort, 0);
2730 MODULE_PARM_DESC(product, "product code (from vendor)");
2731 #endif
2732 
2733 static int
2734 usbtest_probe(struct usb_interface *intf, const struct usb_device_id *id)
2735 {
2736 	struct usb_device	*udev;
2737 	struct usbtest_dev	*dev;
2738 	struct usbtest_info	*info;
2739 	char			*rtest, *wtest;
2740 	char			*irtest, *iwtest;
2741 	char			*intrtest, *intwtest;
2742 
2743 	udev = interface_to_usbdev(intf);
2744 
2745 #ifdef	GENERIC
2746 	/* specify devices by module parameters? */
2747 	if (id->match_flags == 0) {
2748 		/* vendor match required, product match optional */
2749 		if (!vendor || le16_to_cpu(udev->descriptor.idVendor) != (u16)vendor)
2750 			return -ENODEV;
2751 		if (product && le16_to_cpu(udev->descriptor.idProduct) != (u16)product)
2752 			return -ENODEV;
2753 		dev_info(&intf->dev, "matched module params, "
2754 					"vend=0x%04x prod=0x%04x\n",
2755 				le16_to_cpu(udev->descriptor.idVendor),
2756 				le16_to_cpu(udev->descriptor.idProduct));
2757 	}
2758 #endif
2759 
2760 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2761 	if (!dev)
2762 		return -ENOMEM;
2763 	info = (struct usbtest_info *) id->driver_info;
2764 	dev->info = info;
2765 	mutex_init(&dev->lock);
2766 
2767 	dev->intf = intf;
2768 
2769 	/* cacheline-aligned scratch for i/o */
2770 	dev->buf = kmalloc(TBUF_SIZE, GFP_KERNEL);
2771 	if (dev->buf == NULL) {
2772 		kfree(dev);
2773 		return -ENOMEM;
2774 	}
2775 
2776 	/* NOTE this doesn't yet test the handful of difference that are
2777 	 * visible with high speed interrupts:  bigger maxpacket (1K) and
2778 	 * "high bandwidth" modes (up to 3 packets/uframe).
2779 	 */
2780 	rtest = wtest = "";
2781 	irtest = iwtest = "";
2782 	intrtest = intwtest = "";
2783 	if (force_interrupt || udev->speed == USB_SPEED_LOW) {
2784 		if (info->ep_in) {
2785 			dev->in_pipe = usb_rcvintpipe(udev, info->ep_in);
2786 			rtest = " intr-in";
2787 		}
2788 		if (info->ep_out) {
2789 			dev->out_pipe = usb_sndintpipe(udev, info->ep_out);
2790 			wtest = " intr-out";
2791 		}
2792 	} else {
2793 		if (override_alt >= 0 || info->autoconf) {
2794 			int status;
2795 
2796 			status = get_endpoints(dev, intf);
2797 			if (status < 0) {
2798 				WARNING(dev, "couldn't get endpoints, %d\n",
2799 						status);
2800 				kfree(dev->buf);
2801 				kfree(dev);
2802 				return status;
2803 			}
2804 			/* may find bulk or ISO pipes */
2805 		} else {
2806 			if (info->ep_in)
2807 				dev->in_pipe = usb_rcvbulkpipe(udev,
2808 							info->ep_in);
2809 			if (info->ep_out)
2810 				dev->out_pipe = usb_sndbulkpipe(udev,
2811 							info->ep_out);
2812 		}
2813 		if (dev->in_pipe)
2814 			rtest = " bulk-in";
2815 		if (dev->out_pipe)
2816 			wtest = " bulk-out";
2817 		if (dev->in_iso_pipe)
2818 			irtest = " iso-in";
2819 		if (dev->out_iso_pipe)
2820 			iwtest = " iso-out";
2821 		if (dev->in_int_pipe)
2822 			intrtest = " int-in";
2823 		if (dev->out_int_pipe)
2824 			intwtest = " int-out";
2825 	}
2826 
2827 	usb_set_intfdata(intf, dev);
2828 	dev_info(&intf->dev, "%s\n", info->name);
2829 	dev_info(&intf->dev, "%s {control%s%s%s%s%s%s%s} tests%s\n",
2830 			usb_speed_string(udev->speed),
2831 			info->ctrl_out ? " in/out" : "",
2832 			rtest, wtest,
2833 			irtest, iwtest,
2834 			intrtest, intwtest,
2835 			info->alt >= 0 ? " (+alt)" : "");
2836 	return 0;
2837 }
2838 
2839 static int usbtest_suspend(struct usb_interface *intf, pm_message_t message)
2840 {
2841 	return 0;
2842 }
2843 
2844 static int usbtest_resume(struct usb_interface *intf)
2845 {
2846 	return 0;
2847 }
2848 
2849 
2850 static void usbtest_disconnect(struct usb_interface *intf)
2851 {
2852 	struct usbtest_dev	*dev = usb_get_intfdata(intf);
2853 
2854 	usb_set_intfdata(intf, NULL);
2855 	dev_dbg(&intf->dev, "disconnect\n");
2856 	kfree(dev);
2857 }
2858 
2859 /* Basic testing only needs a device that can source or sink bulk traffic.
2860  * Any device can test control transfers (default with GENERIC binding).
2861  *
2862  * Several entries work with the default EP0 implementation that's built
2863  * into EZ-USB chips.  There's a default vendor ID which can be overridden
2864  * by (very) small config EEPROMS, but otherwise all these devices act
2865  * identically until firmware is loaded:  only EP0 works.  It turns out
2866  * to be easy to make other endpoints work, without modifying that EP0
2867  * behavior.  For now, we expect that kind of firmware.
2868  */
2869 
2870 /* an21xx or fx versions of ez-usb */
2871 static struct usbtest_info ez1_info = {
2872 	.name		= "EZ-USB device",
2873 	.ep_in		= 2,
2874 	.ep_out		= 2,
2875 	.alt		= 1,
2876 };
2877 
2878 /* fx2 version of ez-usb */
2879 static struct usbtest_info ez2_info = {
2880 	.name		= "FX2 device",
2881 	.ep_in		= 6,
2882 	.ep_out		= 2,
2883 	.alt		= 1,
2884 };
2885 
2886 /* ezusb family device with dedicated usb test firmware,
2887  */
2888 static struct usbtest_info fw_info = {
2889 	.name		= "usb test device",
2890 	.ep_in		= 2,
2891 	.ep_out		= 2,
2892 	.alt		= 1,
2893 	.autoconf	= 1,		/* iso and ctrl_out need autoconf */
2894 	.ctrl_out	= 1,
2895 	.iso		= 1,		/* iso_ep's are #8 in/out */
2896 };
2897 
2898 /* peripheral running Linux and 'zero.c' test firmware, or
2899  * its user-mode cousin. different versions of this use
2900  * different hardware with the same vendor/product codes.
2901  * host side MUST rely on the endpoint descriptors.
2902  */
2903 static struct usbtest_info gz_info = {
2904 	.name		= "Linux gadget zero",
2905 	.autoconf	= 1,
2906 	.ctrl_out	= 1,
2907 	.iso		= 1,
2908 	.intr		= 1,
2909 	.alt		= 0,
2910 };
2911 
2912 static struct usbtest_info um_info = {
2913 	.name		= "Linux user mode test driver",
2914 	.autoconf	= 1,
2915 	.alt		= -1,
2916 };
2917 
2918 static struct usbtest_info um2_info = {
2919 	.name		= "Linux user mode ISO test driver",
2920 	.autoconf	= 1,
2921 	.iso		= 1,
2922 	.alt		= -1,
2923 };
2924 
2925 #ifdef IBOT2
2926 /* this is a nice source of high speed bulk data;
2927  * uses an FX2, with firmware provided in the device
2928  */
2929 static struct usbtest_info ibot2_info = {
2930 	.name		= "iBOT2 webcam",
2931 	.ep_in		= 2,
2932 	.alt		= -1,
2933 };
2934 #endif
2935 
2936 #ifdef GENERIC
2937 /* we can use any device to test control traffic */
2938 static struct usbtest_info generic_info = {
2939 	.name		= "Generic USB device",
2940 	.alt		= -1,
2941 };
2942 #endif
2943 
2944 
2945 static const struct usb_device_id id_table[] = {
2946 
2947 	/*-------------------------------------------------------------*/
2948 
2949 	/* EZ-USB devices which download firmware to replace (or in our
2950 	 * case augment) the default device implementation.
2951 	 */
2952 
2953 	/* generic EZ-USB FX controller */
2954 	{ USB_DEVICE(0x0547, 0x2235),
2955 		.driver_info = (unsigned long) &ez1_info,
2956 	},
2957 
2958 	/* CY3671 development board with EZ-USB FX */
2959 	{ USB_DEVICE(0x0547, 0x0080),
2960 		.driver_info = (unsigned long) &ez1_info,
2961 	},
2962 
2963 	/* generic EZ-USB FX2 controller (or development board) */
2964 	{ USB_DEVICE(0x04b4, 0x8613),
2965 		.driver_info = (unsigned long) &ez2_info,
2966 	},
2967 
2968 	/* re-enumerated usb test device firmware */
2969 	{ USB_DEVICE(0xfff0, 0xfff0),
2970 		.driver_info = (unsigned long) &fw_info,
2971 	},
2972 
2973 	/* "Gadget Zero" firmware runs under Linux */
2974 	{ USB_DEVICE(0x0525, 0xa4a0),
2975 		.driver_info = (unsigned long) &gz_info,
2976 	},
2977 
2978 	/* so does a user-mode variant */
2979 	{ USB_DEVICE(0x0525, 0xa4a4),
2980 		.driver_info = (unsigned long) &um_info,
2981 	},
2982 
2983 	/* ... and a user-mode variant that talks iso */
2984 	{ USB_DEVICE(0x0525, 0xa4a3),
2985 		.driver_info = (unsigned long) &um2_info,
2986 	},
2987 
2988 #ifdef KEYSPAN_19Qi
2989 	/* Keyspan 19qi uses an21xx (original EZ-USB) */
2990 	/* this does not coexist with the real Keyspan 19qi driver! */
2991 	{ USB_DEVICE(0x06cd, 0x010b),
2992 		.driver_info = (unsigned long) &ez1_info,
2993 	},
2994 #endif
2995 
2996 	/*-------------------------------------------------------------*/
2997 
2998 #ifdef IBOT2
2999 	/* iBOT2 makes a nice source of high speed bulk-in data */
3000 	/* this does not coexist with a real iBOT2 driver! */
3001 	{ USB_DEVICE(0x0b62, 0x0059),
3002 		.driver_info = (unsigned long) &ibot2_info,
3003 	},
3004 #endif
3005 
3006 	/*-------------------------------------------------------------*/
3007 
3008 #ifdef GENERIC
3009 	/* module params can specify devices to use for control tests */
3010 	{ .driver_info = (unsigned long) &generic_info, },
3011 #endif
3012 
3013 	/*-------------------------------------------------------------*/
3014 
3015 	{ }
3016 };
3017 MODULE_DEVICE_TABLE(usb, id_table);
3018 
3019 static struct usb_driver usbtest_driver = {
3020 	.name =		"usbtest",
3021 	.id_table =	id_table,
3022 	.probe =	usbtest_probe,
3023 	.unlocked_ioctl = usbtest_ioctl,
3024 	.disconnect =	usbtest_disconnect,
3025 	.suspend =	usbtest_suspend,
3026 	.resume =	usbtest_resume,
3027 };
3028 
3029 /*-------------------------------------------------------------------------*/
3030 
3031 static int __init usbtest_init(void)
3032 {
3033 #ifdef GENERIC
3034 	if (vendor)
3035 		pr_debug("params: vend=0x%04x prod=0x%04x\n", vendor, product);
3036 #endif
3037 	return usb_register(&usbtest_driver);
3038 }
3039 module_init(usbtest_init);
3040 
3041 static void __exit usbtest_exit(void)
3042 {
3043 	usb_deregister(&usbtest_driver);
3044 }
3045 module_exit(usbtest_exit);
3046 
3047 MODULE_DESCRIPTION("USB Core/HCD Testing Driver");
3048 MODULE_LICENSE("GPL");
3049 
3050