xref: /freebsd/sys/dev/usb/usb_transfer.c (revision b1f92fa22938fe29ab7e53692ffe0ed7a0ecc4d0)
1 /* $FreeBSD$ */
2 /*-
3  * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #ifdef USB_GLOBAL_INCLUDE_FILE
28 #include USB_GLOBAL_INCLUDE_FILE
29 #else
30 #include <sys/stdint.h>
31 #include <sys/stddef.h>
32 #include <sys/param.h>
33 #include <sys/queue.h>
34 #include <sys/types.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/bus.h>
38 #include <sys/module.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/condvar.h>
42 #include <sys/sysctl.h>
43 #include <sys/sx.h>
44 #include <sys/unistd.h>
45 #include <sys/callout.h>
46 #include <sys/malloc.h>
47 #include <sys/priv.h>
48 #include <sys/proc.h>
49 
50 #include <dev/usb/usb.h>
51 #include <dev/usb/usbdi.h>
52 #include <dev/usb/usbdi_util.h>
53 
54 #define	USB_DEBUG_VAR usb_debug
55 
56 #include <dev/usb/usb_core.h>
57 #include <dev/usb/usb_busdma.h>
58 #include <dev/usb/usb_process.h>
59 #include <dev/usb/usb_transfer.h>
60 #include <dev/usb/usb_device.h>
61 #include <dev/usb/usb_debug.h>
62 #include <dev/usb/usb_util.h>
63 
64 #include <dev/usb/usb_controller.h>
65 #include <dev/usb/usb_bus.h>
66 #include <dev/usb/usb_pf.h>
67 #endif			/* USB_GLOBAL_INCLUDE_FILE */
68 
69 struct usb_std_packet_size {
70 	struct {
71 		uint16_t min;		/* inclusive */
72 		uint16_t max;		/* inclusive */
73 	}	range;
74 
75 	uint16_t fixed[4];
76 };
77 
78 static usb_callback_t usb_request_callback;
79 
80 static const struct usb_config usb_control_ep_cfg[USB_CTRL_XFER_MAX] = {
81 
82 	/* This transfer is used for generic control endpoint transfers */
83 
84 	[0] = {
85 		.type = UE_CONTROL,
86 		.endpoint = 0x00,	/* Control endpoint */
87 		.direction = UE_DIR_ANY,
88 		.bufsize = USB_EP0_BUFSIZE,	/* bytes */
89 		.flags = {.proxy_buffer = 1,},
90 		.callback = &usb_request_callback,
91 		.usb_mode = USB_MODE_DUAL,	/* both modes */
92 	},
93 
94 	/* This transfer is used for generic clear stall only */
95 
96 	[1] = {
97 		.type = UE_CONTROL,
98 		.endpoint = 0x00,	/* Control pipe */
99 		.direction = UE_DIR_ANY,
100 		.bufsize = sizeof(struct usb_device_request),
101 		.callback = &usb_do_clear_stall_callback,
102 		.timeout = 1000,	/* 1 second */
103 		.interval = 50,	/* 50ms */
104 		.usb_mode = USB_MODE_HOST,
105 	},
106 };
107 
108 /* function prototypes */
109 
110 static void	usbd_update_max_frame_size(struct usb_xfer *);
111 static void	usbd_transfer_unsetup_sub(struct usb_xfer_root *, uint8_t);
112 static void	usbd_control_transfer_init(struct usb_xfer *);
113 static int	usbd_setup_ctrl_transfer(struct usb_xfer *);
114 static void	usb_callback_proc(struct usb_proc_msg *);
115 static void	usbd_callback_ss_done_defer(struct usb_xfer *);
116 static void	usbd_callback_wrapper(struct usb_xfer_queue *);
117 static void	usbd_transfer_start_cb(void *);
118 static uint8_t	usbd_callback_wrapper_sub(struct usb_xfer *);
119 static void	usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
120 		    uint8_t type, enum usb_dev_speed speed);
121 
122 /*------------------------------------------------------------------------*
123  *	usb_request_callback
124  *------------------------------------------------------------------------*/
125 static void
126 usb_request_callback(struct usb_xfer *xfer, usb_error_t error)
127 {
128 	if (xfer->flags_int.usb_mode == USB_MODE_DEVICE)
129 		usb_handle_request_callback(xfer, error);
130 	else
131 		usbd_do_request_callback(xfer, error);
132 }
133 
134 /*------------------------------------------------------------------------*
135  *	usbd_update_max_frame_size
136  *
137  * This function updates the maximum frame size, hence high speed USB
138  * can transfer multiple consecutive packets.
139  *------------------------------------------------------------------------*/
140 static void
141 usbd_update_max_frame_size(struct usb_xfer *xfer)
142 {
143 	/* compute maximum frame size */
144 	/* this computation should not overflow 16-bit */
145 	/* max = 15 * 1024 */
146 
147 	xfer->max_frame_size = xfer->max_packet_size * xfer->max_packet_count;
148 }
149 
150 /*------------------------------------------------------------------------*
151  *	usbd_get_dma_delay
152  *
153  * The following function is called when we need to
154  * synchronize with DMA hardware.
155  *
156  * Returns:
157  *    0: no DMA delay required
158  * Else: milliseconds of DMA delay
159  *------------------------------------------------------------------------*/
160 usb_timeout_t
161 usbd_get_dma_delay(struct usb_device *udev)
162 {
163 	const struct usb_bus_methods *mtod;
164 	uint32_t temp;
165 
166 	mtod = udev->bus->methods;
167 	temp = 0;
168 
169 	if (mtod->get_dma_delay) {
170 		(mtod->get_dma_delay) (udev, &temp);
171 		/*
172 		 * Round up and convert to milliseconds. Note that we use
173 		 * 1024 milliseconds per second. to save a division.
174 		 */
175 		temp += 0x3FF;
176 		temp /= 0x400;
177 	}
178 	return (temp);
179 }
180 
181 /*------------------------------------------------------------------------*
182  *	usbd_transfer_setup_sub_malloc
183  *
184  * This function will allocate one or more DMA'able memory chunks
185  * according to "size", "align" and "count" arguments. "ppc" is
186  * pointed to a linear array of USB page caches afterwards.
187  *
188  * If the "align" argument is equal to "1" a non-contiguous allocation
189  * can happen. Else if the "align" argument is greater than "1", the
190  * allocation will always be contiguous in memory.
191  *
192  * Returns:
193  *    0: Success
194  * Else: Failure
195  *------------------------------------------------------------------------*/
196 #if USB_HAVE_BUSDMA
197 uint8_t
198 usbd_transfer_setup_sub_malloc(struct usb_setup_params *parm,
199     struct usb_page_cache **ppc, usb_size_t size, usb_size_t align,
200     usb_size_t count)
201 {
202 	struct usb_page_cache *pc;
203 	struct usb_page *pg;
204 	void *buf;
205 	usb_size_t n_dma_pc;
206 	usb_size_t n_dma_pg;
207 	usb_size_t n_obj;
208 	usb_size_t x;
209 	usb_size_t y;
210 	usb_size_t r;
211 	usb_size_t z;
212 
213 	USB_ASSERT(align > 0, ("Invalid alignment, 0x%08x\n",
214 	    align));
215 	USB_ASSERT(size > 0, ("Invalid size = 0\n"));
216 
217 	if (count == 0) {
218 		return (0);		/* nothing to allocate */
219 	}
220 	/*
221 	 * Make sure that the size is aligned properly.
222 	 */
223 	size = -((-size) & (-align));
224 
225 	/*
226 	 * Try multi-allocation chunks to reduce the number of DMA
227 	 * allocations, hence DMA allocations are slow.
228 	 */
229 	if (align == 1) {
230 		/* special case - non-cached multi page DMA memory */
231 		n_dma_pc = count;
232 		n_dma_pg = (2 + (size / USB_PAGE_SIZE));
233 		n_obj = 1;
234 	} else if (size >= USB_PAGE_SIZE) {
235 		n_dma_pc = count;
236 		n_dma_pg = 1;
237 		n_obj = 1;
238 	} else {
239 		/* compute number of objects per page */
240 #ifdef USB_DMA_SINGLE_ALLOC
241 		n_obj = 1;
242 #else
243 		n_obj = (USB_PAGE_SIZE / size);
244 #endif
245 		/*
246 		 * Compute number of DMA chunks, rounded up
247 		 * to nearest one:
248 		 */
249 		n_dma_pc = ((count + n_obj - 1) / n_obj);
250 		n_dma_pg = 1;
251 	}
252 
253 	/*
254 	 * DMA memory is allocated once, but mapped twice. That's why
255 	 * there is one list for auto-free and another list for
256 	 * non-auto-free which only holds the mapping and not the
257 	 * allocation.
258 	 */
259 	if (parm->buf == NULL) {
260 		/* reserve memory (auto-free) */
261 		parm->dma_page_ptr += n_dma_pc * n_dma_pg;
262 		parm->dma_page_cache_ptr += n_dma_pc;
263 
264 		/* reserve memory (no-auto-free) */
265 		parm->dma_page_ptr += count * n_dma_pg;
266 		parm->xfer_page_cache_ptr += count;
267 		return (0);
268 	}
269 	for (x = 0; x != n_dma_pc; x++) {
270 		/* need to initialize the page cache */
271 		parm->dma_page_cache_ptr[x].tag_parent =
272 		    &parm->curr_xfer->xroot->dma_parent_tag;
273 	}
274 	for (x = 0; x != count; x++) {
275 		/* need to initialize the page cache */
276 		parm->xfer_page_cache_ptr[x].tag_parent =
277 		    &parm->curr_xfer->xroot->dma_parent_tag;
278 	}
279 
280 	if (ppc != NULL) {
281 		if (n_obj != 1)
282 			*ppc = parm->xfer_page_cache_ptr;
283 		else
284 			*ppc = parm->dma_page_cache_ptr;
285 	}
286 	r = count;			/* set remainder count */
287 	z = n_obj * size;		/* set allocation size */
288 	pc = parm->xfer_page_cache_ptr;
289 	pg = parm->dma_page_ptr;
290 
291 	if (n_obj == 1) {
292 	    /*
293 	     * Avoid mapping memory twice if only a single object
294 	     * should be allocated per page cache:
295 	     */
296 	    for (x = 0; x != n_dma_pc; x++) {
297 		if (usb_pc_alloc_mem(parm->dma_page_cache_ptr,
298 		    pg, z, align)) {
299 			return (1);	/* failure */
300 		}
301 		/* Make room for one DMA page cache and "n_dma_pg" pages */
302 		parm->dma_page_cache_ptr++;
303 		pg += n_dma_pg;
304 	    }
305 	} else {
306 	    for (x = 0; x != n_dma_pc; x++) {
307 
308 		if (r < n_obj) {
309 			/* compute last remainder */
310 			z = r * size;
311 			n_obj = r;
312 		}
313 		if (usb_pc_alloc_mem(parm->dma_page_cache_ptr,
314 		    pg, z, align)) {
315 			return (1);	/* failure */
316 		}
317 		/* Set beginning of current buffer */
318 		buf = parm->dma_page_cache_ptr->buffer;
319 		/* Make room for one DMA page cache and "n_dma_pg" pages */
320 		parm->dma_page_cache_ptr++;
321 		pg += n_dma_pg;
322 
323 		for (y = 0; (y != n_obj); y++, r--, pc++, pg += n_dma_pg) {
324 
325 			/* Load sub-chunk into DMA */
326 			if (usb_pc_dmamap_create(pc, size)) {
327 				return (1);	/* failure */
328 			}
329 			pc->buffer = USB_ADD_BYTES(buf, y * size);
330 			pc->page_start = pg;
331 
332 			mtx_lock(pc->tag_parent->mtx);
333 			if (usb_pc_load_mem(pc, size, 1 /* synchronous */ )) {
334 				mtx_unlock(pc->tag_parent->mtx);
335 				return (1);	/* failure */
336 			}
337 			mtx_unlock(pc->tag_parent->mtx);
338 		}
339 	    }
340 	}
341 
342 	parm->xfer_page_cache_ptr = pc;
343 	parm->dma_page_ptr = pg;
344 	return (0);
345 }
346 #endif
347 
348 /*------------------------------------------------------------------------*
349  *	usbd_transfer_setup_sub - transfer setup subroutine
350  *
351  * This function must be called from the "xfer_setup" callback of the
352  * USB Host or Device controller driver when setting up an USB
353  * transfer. This function will setup correct packet sizes, buffer
354  * sizes, flags and more, that are stored in the "usb_xfer"
355  * structure.
356  *------------------------------------------------------------------------*/
357 void
358 usbd_transfer_setup_sub(struct usb_setup_params *parm)
359 {
360 	enum {
361 		REQ_SIZE = 8,
362 		MIN_PKT = 8,
363 	};
364 	struct usb_xfer *xfer = parm->curr_xfer;
365 	const struct usb_config *setup = parm->curr_setup;
366 	struct usb_endpoint_ss_comp_descriptor *ecomp;
367 	struct usb_endpoint_descriptor *edesc;
368 	struct usb_std_packet_size std_size;
369 	usb_frcount_t n_frlengths;
370 	usb_frcount_t n_frbuffers;
371 	usb_frcount_t x;
372 	uint16_t maxp_old;
373 	uint8_t type;
374 	uint8_t zmps;
375 
376 	/*
377 	 * Sanity check. The following parameters must be initialized before
378 	 * calling this function.
379 	 */
380 	if ((parm->hc_max_packet_size == 0) ||
381 	    (parm->hc_max_packet_count == 0) ||
382 	    (parm->hc_max_frame_size == 0)) {
383 		parm->err = USB_ERR_INVAL;
384 		goto done;
385 	}
386 	edesc = xfer->endpoint->edesc;
387 	ecomp = xfer->endpoint->ecomp;
388 
389 	type = (edesc->bmAttributes & UE_XFERTYPE);
390 
391 	xfer->flags = setup->flags;
392 	xfer->nframes = setup->frames;
393 	xfer->timeout = setup->timeout;
394 	xfer->callback = setup->callback;
395 	xfer->interval = setup->interval;
396 	xfer->endpointno = edesc->bEndpointAddress;
397 	xfer->max_packet_size = UGETW(edesc->wMaxPacketSize);
398 	xfer->max_packet_count = 1;
399 	/* make a shadow copy: */
400 	xfer->flags_int.usb_mode = parm->udev->flags.usb_mode;
401 
402 	parm->bufsize = setup->bufsize;
403 
404 	switch (parm->speed) {
405 	case USB_SPEED_HIGH:
406 		switch (type) {
407 		case UE_ISOCHRONOUS:
408 		case UE_INTERRUPT:
409 			xfer->max_packet_count +=
410 			    (xfer->max_packet_size >> 11) & 3;
411 
412 			/* check for invalid max packet count */
413 			if (xfer->max_packet_count > 3)
414 				xfer->max_packet_count = 3;
415 			break;
416 		default:
417 			break;
418 		}
419 		xfer->max_packet_size &= 0x7FF;
420 		break;
421 	case USB_SPEED_SUPER:
422 		xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3;
423 
424 		if (ecomp != NULL)
425 			xfer->max_packet_count += ecomp->bMaxBurst;
426 
427 		if ((xfer->max_packet_count == 0) ||
428 		    (xfer->max_packet_count > 16))
429 			xfer->max_packet_count = 16;
430 
431 		switch (type) {
432 		case UE_CONTROL:
433 			xfer->max_packet_count = 1;
434 			break;
435 		case UE_ISOCHRONOUS:
436 			if (ecomp != NULL) {
437 				uint8_t mult;
438 
439 				mult = UE_GET_SS_ISO_MULT(
440 				    ecomp->bmAttributes) + 1;
441 				if (mult > 3)
442 					mult = 3;
443 
444 				xfer->max_packet_count *= mult;
445 			}
446 			break;
447 		default:
448 			break;
449 		}
450 		xfer->max_packet_size &= 0x7FF;
451 		break;
452 	default:
453 		break;
454 	}
455 	/* range check "max_packet_count" */
456 
457 	if (xfer->max_packet_count > parm->hc_max_packet_count) {
458 		xfer->max_packet_count = parm->hc_max_packet_count;
459 	}
460 
461 	/* store max packet size value before filtering */
462 
463 	maxp_old = xfer->max_packet_size;
464 
465 	/* filter "wMaxPacketSize" according to HC capabilities */
466 
467 	if ((xfer->max_packet_size > parm->hc_max_packet_size) ||
468 	    (xfer->max_packet_size == 0)) {
469 		xfer->max_packet_size = parm->hc_max_packet_size;
470 	}
471 	/* filter "wMaxPacketSize" according to standard sizes */
472 
473 	usbd_get_std_packet_size(&std_size, type, parm->speed);
474 
475 	if (std_size.range.min || std_size.range.max) {
476 
477 		if (xfer->max_packet_size < std_size.range.min) {
478 			xfer->max_packet_size = std_size.range.min;
479 		}
480 		if (xfer->max_packet_size > std_size.range.max) {
481 			xfer->max_packet_size = std_size.range.max;
482 		}
483 	} else {
484 
485 		if (xfer->max_packet_size >= std_size.fixed[3]) {
486 			xfer->max_packet_size = std_size.fixed[3];
487 		} else if (xfer->max_packet_size >= std_size.fixed[2]) {
488 			xfer->max_packet_size = std_size.fixed[2];
489 		} else if (xfer->max_packet_size >= std_size.fixed[1]) {
490 			xfer->max_packet_size = std_size.fixed[1];
491 		} else {
492 			/* only one possibility left */
493 			xfer->max_packet_size = std_size.fixed[0];
494 		}
495 	}
496 
497 	/*
498 	 * Check if the max packet size was outside its allowed range
499 	 * and clamped to a valid value:
500 	 */
501 	if (maxp_old != xfer->max_packet_size)
502 		xfer->flags_int.maxp_was_clamped = 1;
503 
504 	/* compute "max_frame_size" */
505 
506 	usbd_update_max_frame_size(xfer);
507 
508 	/* check interrupt interval and transfer pre-delay */
509 
510 	if (type == UE_ISOCHRONOUS) {
511 
512 		uint16_t frame_limit;
513 
514 		xfer->interval = 0;	/* not used, must be zero */
515 		xfer->flags_int.isochronous_xfr = 1;	/* set flag */
516 
517 		if (xfer->timeout == 0) {
518 			/*
519 			 * set a default timeout in
520 			 * case something goes wrong!
521 			 */
522 			xfer->timeout = 1000 / 4;
523 		}
524 		switch (parm->speed) {
525 		case USB_SPEED_LOW:
526 		case USB_SPEED_FULL:
527 			frame_limit = USB_MAX_FS_ISOC_FRAMES_PER_XFER;
528 			xfer->fps_shift = 0;
529 			break;
530 		default:
531 			frame_limit = USB_MAX_HS_ISOC_FRAMES_PER_XFER;
532 			xfer->fps_shift = edesc->bInterval;
533 			if (xfer->fps_shift > 0)
534 				xfer->fps_shift--;
535 			if (xfer->fps_shift > 3)
536 				xfer->fps_shift = 3;
537 			if (xfer->flags.pre_scale_frames != 0)
538 				xfer->nframes <<= (3 - xfer->fps_shift);
539 			break;
540 		}
541 
542 		if (xfer->nframes > frame_limit) {
543 			/*
544 			 * this is not going to work
545 			 * cross hardware
546 			 */
547 			parm->err = USB_ERR_INVAL;
548 			goto done;
549 		}
550 		if (xfer->nframes == 0) {
551 			/*
552 			 * this is not a valid value
553 			 */
554 			parm->err = USB_ERR_ZERO_NFRAMES;
555 			goto done;
556 		}
557 	} else {
558 
559 		/*
560 		 * If a value is specified use that else check the
561 		 * endpoint descriptor!
562 		 */
563 		if (type == UE_INTERRUPT) {
564 
565 			uint32_t temp;
566 
567 			if (xfer->interval == 0) {
568 
569 				xfer->interval = edesc->bInterval;
570 
571 				switch (parm->speed) {
572 				case USB_SPEED_LOW:
573 				case USB_SPEED_FULL:
574 					break;
575 				default:
576 					/* 125us -> 1ms */
577 					if (xfer->interval < 4)
578 						xfer->interval = 1;
579 					else if (xfer->interval > 16)
580 						xfer->interval = (1 << (16 - 4));
581 					else
582 						xfer->interval =
583 						    (1 << (xfer->interval - 4));
584 					break;
585 				}
586 			}
587 
588 			if (xfer->interval == 0) {
589 				/*
590 				 * One millisecond is the smallest
591 				 * interval we support:
592 				 */
593 				xfer->interval = 1;
594 			}
595 
596 			xfer->fps_shift = 0;
597 			temp = 1;
598 
599 			while ((temp != 0) && (temp < xfer->interval)) {
600 				xfer->fps_shift++;
601 				temp *= 2;
602 			}
603 
604 			switch (parm->speed) {
605 			case USB_SPEED_LOW:
606 			case USB_SPEED_FULL:
607 				break;
608 			default:
609 				xfer->fps_shift += 3;
610 				break;
611 			}
612 		}
613 	}
614 
615 	/*
616 	 * NOTE: we do not allow "max_packet_size" or "max_frame_size"
617 	 * to be equal to zero when setting up USB transfers, hence
618 	 * this leads to alot of extra code in the USB kernel.
619 	 */
620 
621 	if ((xfer->max_frame_size == 0) ||
622 	    (xfer->max_packet_size == 0)) {
623 
624 		zmps = 1;
625 
626 		if ((parm->bufsize <= MIN_PKT) &&
627 		    (type != UE_CONTROL) &&
628 		    (type != UE_BULK)) {
629 
630 			/* workaround */
631 			xfer->max_packet_size = MIN_PKT;
632 			xfer->max_packet_count = 1;
633 			parm->bufsize = 0;	/* automatic setup length */
634 			usbd_update_max_frame_size(xfer);
635 
636 		} else {
637 			parm->err = USB_ERR_ZERO_MAXP;
638 			goto done;
639 		}
640 
641 	} else {
642 		zmps = 0;
643 	}
644 
645 	/*
646 	 * check if we should setup a default
647 	 * length:
648 	 */
649 
650 	if (parm->bufsize == 0) {
651 
652 		parm->bufsize = xfer->max_frame_size;
653 
654 		if (type == UE_ISOCHRONOUS) {
655 			parm->bufsize *= xfer->nframes;
656 		}
657 	}
658 	/*
659 	 * check if we are about to setup a proxy
660 	 * type of buffer:
661 	 */
662 
663 	if (xfer->flags.proxy_buffer) {
664 
665 		/* round bufsize up */
666 
667 		parm->bufsize += (xfer->max_frame_size - 1);
668 
669 		if (parm->bufsize < xfer->max_frame_size) {
670 			/* length wrapped around */
671 			parm->err = USB_ERR_INVAL;
672 			goto done;
673 		}
674 		/* subtract remainder */
675 
676 		parm->bufsize -= (parm->bufsize % xfer->max_frame_size);
677 
678 		/* add length of USB device request structure, if any */
679 
680 		if (type == UE_CONTROL) {
681 			parm->bufsize += REQ_SIZE;	/* SETUP message */
682 		}
683 	}
684 	xfer->max_data_length = parm->bufsize;
685 
686 	/* Setup "n_frlengths" and "n_frbuffers" */
687 
688 	if (type == UE_ISOCHRONOUS) {
689 		n_frlengths = xfer->nframes;
690 		n_frbuffers = 1;
691 	} else {
692 
693 		if (type == UE_CONTROL) {
694 			xfer->flags_int.control_xfr = 1;
695 			if (xfer->nframes == 0) {
696 				if (parm->bufsize <= REQ_SIZE) {
697 					/*
698 					 * there will never be any data
699 					 * stage
700 					 */
701 					xfer->nframes = 1;
702 				} else {
703 					xfer->nframes = 2;
704 				}
705 			}
706 		} else {
707 			if (xfer->nframes == 0) {
708 				xfer->nframes = 1;
709 			}
710 		}
711 
712 		n_frlengths = xfer->nframes;
713 		n_frbuffers = xfer->nframes;
714 	}
715 
716 	/*
717 	 * check if we have room for the
718 	 * USB device request structure:
719 	 */
720 
721 	if (type == UE_CONTROL) {
722 
723 		if (xfer->max_data_length < REQ_SIZE) {
724 			/* length wrapped around or too small bufsize */
725 			parm->err = USB_ERR_INVAL;
726 			goto done;
727 		}
728 		xfer->max_data_length -= REQ_SIZE;
729 	}
730 	/*
731 	 * Setup "frlengths" and shadow "frlengths" for keeping the
732 	 * initial frame lengths when a USB transfer is complete. This
733 	 * information is useful when computing isochronous offsets.
734 	 */
735 	xfer->frlengths = parm->xfer_length_ptr;
736 	parm->xfer_length_ptr += 2 * n_frlengths;
737 
738 	/* setup "frbuffers" */
739 	xfer->frbuffers = parm->xfer_page_cache_ptr;
740 	parm->xfer_page_cache_ptr += n_frbuffers;
741 
742 	/* initialize max frame count */
743 	xfer->max_frame_count = xfer->nframes;
744 
745 	/*
746 	 * check if we need to setup
747 	 * a local buffer:
748 	 */
749 
750 	if (!xfer->flags.ext_buffer) {
751 #if USB_HAVE_BUSDMA
752 		struct usb_page_search page_info;
753 		struct usb_page_cache *pc;
754 
755 		if (usbd_transfer_setup_sub_malloc(parm,
756 		    &pc, parm->bufsize, 1, 1)) {
757 			parm->err = USB_ERR_NOMEM;
758 		} else if (parm->buf != NULL) {
759 
760 			usbd_get_page(pc, 0, &page_info);
761 
762 			xfer->local_buffer = page_info.buffer;
763 
764 			usbd_xfer_set_frame_offset(xfer, 0, 0);
765 
766 			if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
767 				usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
768 			}
769 		}
770 #else
771 		/* align data */
772 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
773 
774 		if (parm->buf != NULL) {
775 			xfer->local_buffer =
776 			    USB_ADD_BYTES(parm->buf, parm->size[0]);
777 
778 			usbd_xfer_set_frame_offset(xfer, 0, 0);
779 
780 			if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
781 				usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
782 			}
783 		}
784 		parm->size[0] += parm->bufsize;
785 
786 		/* align data again */
787 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
788 #endif
789 	}
790 	/*
791 	 * Compute maximum buffer size
792 	 */
793 
794 	if (parm->bufsize_max < parm->bufsize) {
795 		parm->bufsize_max = parm->bufsize;
796 	}
797 #if USB_HAVE_BUSDMA
798 	if (xfer->flags_int.bdma_enable) {
799 		/*
800 		 * Setup "dma_page_ptr".
801 		 *
802 		 * Proof for formula below:
803 		 *
804 		 * Assume there are three USB frames having length "a", "b" and
805 		 * "c". These USB frames will at maximum need "z"
806 		 * "usb_page" structures. "z" is given by:
807 		 *
808 		 * z = ((a / USB_PAGE_SIZE) + 2) + ((b / USB_PAGE_SIZE) + 2) +
809 		 * ((c / USB_PAGE_SIZE) + 2);
810 		 *
811 		 * Constraining "a", "b" and "c" like this:
812 		 *
813 		 * (a + b + c) <= parm->bufsize
814 		 *
815 		 * We know that:
816 		 *
817 		 * z <= ((parm->bufsize / USB_PAGE_SIZE) + (3*2));
818 		 *
819 		 * Here is the general formula:
820 		 */
821 		xfer->dma_page_ptr = parm->dma_page_ptr;
822 		parm->dma_page_ptr += (2 * n_frbuffers);
823 		parm->dma_page_ptr += (parm->bufsize / USB_PAGE_SIZE);
824 	}
825 #endif
826 	if (zmps) {
827 		/* correct maximum data length */
828 		xfer->max_data_length = 0;
829 	}
830 	/* subtract USB frame remainder from "hc_max_frame_size" */
831 
832 	xfer->max_hc_frame_size =
833 	    (parm->hc_max_frame_size -
834 	    (parm->hc_max_frame_size % xfer->max_frame_size));
835 
836 	if (xfer->max_hc_frame_size == 0) {
837 		parm->err = USB_ERR_INVAL;
838 		goto done;
839 	}
840 
841 	/* initialize frame buffers */
842 
843 	if (parm->buf) {
844 		for (x = 0; x != n_frbuffers; x++) {
845 			xfer->frbuffers[x].tag_parent =
846 			    &xfer->xroot->dma_parent_tag;
847 #if USB_HAVE_BUSDMA
848 			if (xfer->flags_int.bdma_enable &&
849 			    (parm->bufsize_max > 0)) {
850 
851 				if (usb_pc_dmamap_create(
852 				    xfer->frbuffers + x,
853 				    parm->bufsize_max)) {
854 					parm->err = USB_ERR_NOMEM;
855 					goto done;
856 				}
857 			}
858 #endif
859 		}
860 	}
861 done:
862 	if (parm->err) {
863 		/*
864 		 * Set some dummy values so that we avoid division by zero:
865 		 */
866 		xfer->max_hc_frame_size = 1;
867 		xfer->max_frame_size = 1;
868 		xfer->max_packet_size = 1;
869 		xfer->max_data_length = 0;
870 		xfer->nframes = 0;
871 		xfer->max_frame_count = 0;
872 	}
873 }
874 
875 static uint8_t
876 usbd_transfer_setup_has_bulk(const struct usb_config *setup_start,
877     uint16_t n_setup)
878 {
879 	while (n_setup--) {
880 		uint8_t type = setup_start[n_setup].type;
881 		if (type == UE_BULK || type == UE_BULK_INTR ||
882 		    type == UE_TYPE_ANY)
883 			return (1);
884 	}
885 	return (0);
886 }
887 
888 /*------------------------------------------------------------------------*
889  *	usbd_transfer_setup - setup an array of USB transfers
890  *
891  * NOTE: You must always call "usbd_transfer_unsetup" after calling
892  * "usbd_transfer_setup" if success was returned.
893  *
894  * The idea is that the USB device driver should pre-allocate all its
895  * transfers by one call to this function.
896  *
897  * Return values:
898  *    0: Success
899  * Else: Failure
900  *------------------------------------------------------------------------*/
901 usb_error_t
902 usbd_transfer_setup(struct usb_device *udev,
903     const uint8_t *ifaces, struct usb_xfer **ppxfer,
904     const struct usb_config *setup_start, uint16_t n_setup,
905     void *priv_sc, struct mtx *xfer_mtx)
906 {
907 	const struct usb_config *setup_end = setup_start + n_setup;
908 	const struct usb_config *setup;
909 	struct usb_setup_params *parm;
910 	struct usb_endpoint *ep;
911 	struct usb_xfer_root *info;
912 	struct usb_xfer *xfer;
913 	void *buf = NULL;
914 	usb_error_t error = 0;
915 	uint16_t n;
916 	uint16_t refcount;
917 	uint8_t do_unlock;
918 
919 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
920 	    "usbd_transfer_setup can sleep!");
921 
922 	/* do some checking first */
923 
924 	if (n_setup == 0) {
925 		DPRINTFN(6, "setup array has zero length!\n");
926 		return (USB_ERR_INVAL);
927 	}
928 	if (ifaces == 0) {
929 		DPRINTFN(6, "ifaces array is NULL!\n");
930 		return (USB_ERR_INVAL);
931 	}
932 	if (xfer_mtx == NULL) {
933 		DPRINTFN(6, "using global lock\n");
934 		xfer_mtx = &Giant;
935 	}
936 
937 	/* more sanity checks */
938 
939 	for (setup = setup_start, n = 0;
940 	    setup != setup_end; setup++, n++) {
941 		if (setup->bufsize == (usb_frlength_t)-1) {
942 			error = USB_ERR_BAD_BUFSIZE;
943 			DPRINTF("invalid bufsize\n");
944 		}
945 		if (setup->callback == NULL) {
946 			error = USB_ERR_NO_CALLBACK;
947 			DPRINTF("no callback\n");
948 		}
949 		ppxfer[n] = NULL;
950 	}
951 
952 	if (error)
953 		return (error);
954 
955 	/* Protect scratch area */
956 	do_unlock = usbd_enum_lock(udev);
957 
958 	refcount = 0;
959 	info = NULL;
960 
961 	parm = &udev->scratch.xfer_setup[0].parm;
962 	memset(parm, 0, sizeof(*parm));
963 
964 	parm->udev = udev;
965 	parm->speed = usbd_get_speed(udev);
966 	parm->hc_max_packet_count = 1;
967 
968 	if (parm->speed >= USB_SPEED_MAX) {
969 		parm->err = USB_ERR_INVAL;
970 		goto done;
971 	}
972 	/* setup all transfers */
973 
974 	while (1) {
975 
976 		if (buf) {
977 			/*
978 			 * Initialize the "usb_xfer_root" structure,
979 			 * which is common for all our USB transfers.
980 			 */
981 			info = USB_ADD_BYTES(buf, 0);
982 
983 			info->memory_base = buf;
984 			info->memory_size = parm->size[0];
985 
986 #if USB_HAVE_BUSDMA
987 			info->dma_page_cache_start = USB_ADD_BYTES(buf, parm->size[4]);
988 			info->dma_page_cache_end = USB_ADD_BYTES(buf, parm->size[5]);
989 #endif
990 			info->xfer_page_cache_start = USB_ADD_BYTES(buf, parm->size[5]);
991 			info->xfer_page_cache_end = USB_ADD_BYTES(buf, parm->size[2]);
992 
993 			cv_init(&info->cv_drain, "WDRAIN");
994 
995 			info->xfer_mtx = xfer_mtx;
996 #if USB_HAVE_BUSDMA
997 			usb_dma_tag_setup(&info->dma_parent_tag,
998 			    parm->dma_tag_p, udev->bus->dma_parent_tag[0].tag,
999 			    xfer_mtx, &usb_bdma_done_event, udev->bus->dma_bits,
1000 			    parm->dma_tag_max);
1001 #endif
1002 
1003 			info->bus = udev->bus;
1004 			info->udev = udev;
1005 
1006 			TAILQ_INIT(&info->done_q.head);
1007 			info->done_q.command = &usbd_callback_wrapper;
1008 #if USB_HAVE_BUSDMA
1009 			TAILQ_INIT(&info->dma_q.head);
1010 			info->dma_q.command = &usb_bdma_work_loop;
1011 #endif
1012 			info->done_m[0].hdr.pm_callback = &usb_callback_proc;
1013 			info->done_m[0].xroot = info;
1014 			info->done_m[1].hdr.pm_callback = &usb_callback_proc;
1015 			info->done_m[1].xroot = info;
1016 
1017 			/*
1018 			 * In device side mode control endpoint
1019 			 * requests need to run from a separate
1020 			 * context, else there is a chance of
1021 			 * deadlock!
1022 			 */
1023 			if (setup_start == usb_control_ep_cfg)
1024 				info->done_p =
1025 				    USB_BUS_CONTROL_XFER_PROC(udev->bus);
1026 			else if (xfer_mtx == &Giant)
1027 				info->done_p =
1028 				    USB_BUS_GIANT_PROC(udev->bus);
1029 			else if (usbd_transfer_setup_has_bulk(setup_start, n_setup))
1030 				info->done_p =
1031 				    USB_BUS_NON_GIANT_BULK_PROC(udev->bus);
1032 			else
1033 				info->done_p =
1034 				    USB_BUS_NON_GIANT_ISOC_PROC(udev->bus);
1035 		}
1036 		/* reset sizes */
1037 
1038 		parm->size[0] = 0;
1039 		parm->buf = buf;
1040 		parm->size[0] += sizeof(info[0]);
1041 
1042 		for (setup = setup_start, n = 0;
1043 		    setup != setup_end; setup++, n++) {
1044 
1045 			/* skip USB transfers without callbacks: */
1046 			if (setup->callback == NULL) {
1047 				continue;
1048 			}
1049 			/* see if there is a matching endpoint */
1050 			ep = usbd_get_endpoint(udev,
1051 			    ifaces[setup->if_index], setup);
1052 
1053 			/*
1054 			 * Check that the USB PIPE is valid and that
1055 			 * the endpoint mode is proper.
1056 			 *
1057 			 * Make sure we don't allocate a streams
1058 			 * transfer when such a combination is not
1059 			 * valid.
1060 			 */
1061 			if ((ep == NULL) || (ep->methods == NULL) ||
1062 			    ((ep->ep_mode != USB_EP_MODE_STREAMS) &&
1063 			    (ep->ep_mode != USB_EP_MODE_DEFAULT)) ||
1064 			    (setup->stream_id != 0 &&
1065 			    (setup->stream_id >= USB_MAX_EP_STREAMS ||
1066 			    (ep->ep_mode != USB_EP_MODE_STREAMS)))) {
1067 				if (setup->flags.no_pipe_ok)
1068 					continue;
1069 				if ((setup->usb_mode != USB_MODE_DUAL) &&
1070 				    (setup->usb_mode != udev->flags.usb_mode))
1071 					continue;
1072 				parm->err = USB_ERR_NO_PIPE;
1073 				goto done;
1074 			}
1075 
1076 			/* align data properly */
1077 			parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1078 
1079 			/* store current setup pointer */
1080 			parm->curr_setup = setup;
1081 
1082 			if (buf) {
1083 				/*
1084 				 * Common initialization of the
1085 				 * "usb_xfer" structure.
1086 				 */
1087 				xfer = USB_ADD_BYTES(buf, parm->size[0]);
1088 				xfer->address = udev->address;
1089 				xfer->priv_sc = priv_sc;
1090 				xfer->xroot = info;
1091 
1092 				usb_callout_init_mtx(&xfer->timeout_handle,
1093 				    &udev->bus->bus_mtx, 0);
1094 			} else {
1095 				/*
1096 				 * Setup a dummy xfer, hence we are
1097 				 * writing to the "usb_xfer"
1098 				 * structure pointed to by "xfer"
1099 				 * before we have allocated any
1100 				 * memory:
1101 				 */
1102 				xfer = &udev->scratch.xfer_setup[0].dummy;
1103 				memset(xfer, 0, sizeof(*xfer));
1104 				refcount++;
1105 			}
1106 
1107 			/* set transfer endpoint pointer */
1108 			xfer->endpoint = ep;
1109 
1110 			/* set transfer stream ID */
1111 			xfer->stream_id = setup->stream_id;
1112 
1113 			parm->size[0] += sizeof(xfer[0]);
1114 			parm->methods = xfer->endpoint->methods;
1115 			parm->curr_xfer = xfer;
1116 
1117 			/*
1118 			 * Call the Host or Device controller transfer
1119 			 * setup routine:
1120 			 */
1121 			(udev->bus->methods->xfer_setup) (parm);
1122 
1123 			/* check for error */
1124 			if (parm->err)
1125 				goto done;
1126 
1127 			if (buf) {
1128 				/*
1129 				 * Increment the endpoint refcount. This
1130 				 * basically prevents setting a new
1131 				 * configuration and alternate setting
1132 				 * when USB transfers are in use on
1133 				 * the given interface. Search the USB
1134 				 * code for "endpoint->refcount_alloc" if you
1135 				 * want more information.
1136 				 */
1137 				USB_BUS_LOCK(info->bus);
1138 				if (xfer->endpoint->refcount_alloc >= USB_EP_REF_MAX)
1139 					parm->err = USB_ERR_INVAL;
1140 
1141 				xfer->endpoint->refcount_alloc++;
1142 
1143 				if (xfer->endpoint->refcount_alloc == 0)
1144 					panic("usbd_transfer_setup(): Refcount wrapped to zero\n");
1145 				USB_BUS_UNLOCK(info->bus);
1146 
1147 				/*
1148 				 * Whenever we set ppxfer[] then we
1149 				 * also need to increment the
1150 				 * "setup_refcount":
1151 				 */
1152 				info->setup_refcount++;
1153 
1154 				/*
1155 				 * Transfer is successfully setup and
1156 				 * can be used:
1157 				 */
1158 				ppxfer[n] = xfer;
1159 			}
1160 
1161 			/* check for error */
1162 			if (parm->err)
1163 				goto done;
1164 		}
1165 
1166 		if (buf != NULL || parm->err != 0)
1167 			goto done;
1168 
1169 		/* if no transfers, nothing to do */
1170 		if (refcount == 0)
1171 			goto done;
1172 
1173 		/* align data properly */
1174 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1175 
1176 		/* store offset temporarily */
1177 		parm->size[1] = parm->size[0];
1178 
1179 		/*
1180 		 * The number of DMA tags required depends on
1181 		 * the number of endpoints. The current estimate
1182 		 * for maximum number of DMA tags per endpoint
1183 		 * is three:
1184 		 * 1) for loading memory
1185 		 * 2) for allocating memory
1186 		 * 3) for fixing memory [UHCI]
1187 		 */
1188 		parm->dma_tag_max += 3 * MIN(n_setup, USB_EP_MAX);
1189 
1190 		/*
1191 		 * DMA tags for QH, TD, Data and more.
1192 		 */
1193 		parm->dma_tag_max += 8;
1194 
1195 		parm->dma_tag_p += parm->dma_tag_max;
1196 
1197 		parm->size[0] += ((uint8_t *)parm->dma_tag_p) -
1198 		    ((uint8_t *)0);
1199 
1200 		/* align data properly */
1201 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1202 
1203 		/* store offset temporarily */
1204 		parm->size[3] = parm->size[0];
1205 
1206 		parm->size[0] += ((uint8_t *)parm->dma_page_ptr) -
1207 		    ((uint8_t *)0);
1208 
1209 		/* align data properly */
1210 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1211 
1212 		/* store offset temporarily */
1213 		parm->size[4] = parm->size[0];
1214 
1215 		parm->size[0] += ((uint8_t *)parm->dma_page_cache_ptr) -
1216 		    ((uint8_t *)0);
1217 
1218 		/* store end offset temporarily */
1219 		parm->size[5] = parm->size[0];
1220 
1221 		parm->size[0] += ((uint8_t *)parm->xfer_page_cache_ptr) -
1222 		    ((uint8_t *)0);
1223 
1224 		/* store end offset temporarily */
1225 
1226 		parm->size[2] = parm->size[0];
1227 
1228 		/* align data properly */
1229 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1230 
1231 		parm->size[6] = parm->size[0];
1232 
1233 		parm->size[0] += ((uint8_t *)parm->xfer_length_ptr) -
1234 		    ((uint8_t *)0);
1235 
1236 		/* align data properly */
1237 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1238 
1239 		/* allocate zeroed memory */
1240 		buf = malloc(parm->size[0], M_USB, M_WAITOK | M_ZERO);
1241 
1242 		if (buf == NULL) {
1243 			parm->err = USB_ERR_NOMEM;
1244 			DPRINTFN(0, "cannot allocate memory block for "
1245 			    "configuration (%d bytes)\n",
1246 			    parm->size[0]);
1247 			goto done;
1248 		}
1249 		parm->dma_tag_p = USB_ADD_BYTES(buf, parm->size[1]);
1250 		parm->dma_page_ptr = USB_ADD_BYTES(buf, parm->size[3]);
1251 		parm->dma_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[4]);
1252 		parm->xfer_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[5]);
1253 		parm->xfer_length_ptr = USB_ADD_BYTES(buf, parm->size[6]);
1254 	}
1255 
1256 done:
1257 	if (buf) {
1258 		if (info->setup_refcount == 0) {
1259 			/*
1260 			 * "usbd_transfer_unsetup_sub" will unlock
1261 			 * the bus mutex before returning !
1262 			 */
1263 			USB_BUS_LOCK(info->bus);
1264 
1265 			/* something went wrong */
1266 			usbd_transfer_unsetup_sub(info, 0);
1267 		}
1268 	}
1269 
1270 	/* check if any errors happened */
1271 	if (parm->err)
1272 		usbd_transfer_unsetup(ppxfer, n_setup);
1273 
1274 	error = parm->err;
1275 
1276 	if (do_unlock)
1277 		usbd_enum_unlock(udev);
1278 
1279 	return (error);
1280 }
1281 
1282 /*------------------------------------------------------------------------*
1283  *	usbd_transfer_unsetup_sub - factored out code
1284  *------------------------------------------------------------------------*/
1285 static void
1286 usbd_transfer_unsetup_sub(struct usb_xfer_root *info, uint8_t needs_delay)
1287 {
1288 #if USB_HAVE_BUSDMA
1289 	struct usb_page_cache *pc;
1290 #endif
1291 
1292 	USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
1293 
1294 	/* wait for any outstanding DMA operations */
1295 
1296 	if (needs_delay) {
1297 		usb_timeout_t temp;
1298 		temp = usbd_get_dma_delay(info->udev);
1299 		if (temp != 0) {
1300 			usb_pause_mtx(&info->bus->bus_mtx,
1301 			    USB_MS_TO_TICKS(temp));
1302 		}
1303 	}
1304 
1305 	/* make sure that our done messages are not queued anywhere */
1306 	usb_proc_mwait(info->done_p, &info->done_m[0], &info->done_m[1]);
1307 
1308 	USB_BUS_UNLOCK(info->bus);
1309 
1310 #if USB_HAVE_BUSDMA
1311 	/* free DMA'able memory, if any */
1312 	pc = info->dma_page_cache_start;
1313 	while (pc != info->dma_page_cache_end) {
1314 		usb_pc_free_mem(pc);
1315 		pc++;
1316 	}
1317 
1318 	/* free DMA maps in all "xfer->frbuffers" */
1319 	pc = info->xfer_page_cache_start;
1320 	while (pc != info->xfer_page_cache_end) {
1321 		usb_pc_dmamap_destroy(pc);
1322 		pc++;
1323 	}
1324 
1325 	/* free all DMA tags */
1326 	usb_dma_tag_unsetup(&info->dma_parent_tag);
1327 #endif
1328 
1329 	cv_destroy(&info->cv_drain);
1330 
1331 	/*
1332 	 * free the "memory_base" last, hence the "info" structure is
1333 	 * contained within the "memory_base"!
1334 	 */
1335 	free(info->memory_base, M_USB);
1336 }
1337 
1338 /*------------------------------------------------------------------------*
1339  *	usbd_transfer_unsetup - unsetup/free an array of USB transfers
1340  *
1341  * NOTE: All USB transfers in progress will get called back passing
1342  * the error code "USB_ERR_CANCELLED" before this function
1343  * returns.
1344  *------------------------------------------------------------------------*/
1345 void
1346 usbd_transfer_unsetup(struct usb_xfer **pxfer, uint16_t n_setup)
1347 {
1348 	struct usb_xfer *xfer;
1349 	struct usb_xfer_root *info;
1350 	uint8_t needs_delay = 0;
1351 
1352 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1353 	    "usbd_transfer_unsetup can sleep!");
1354 
1355 	while (n_setup--) {
1356 		xfer = pxfer[n_setup];
1357 
1358 		if (xfer == NULL)
1359 			continue;
1360 
1361 		info = xfer->xroot;
1362 
1363 		USB_XFER_LOCK(xfer);
1364 		USB_BUS_LOCK(info->bus);
1365 
1366 		/*
1367 		 * HINT: when you start/stop a transfer, it might be a
1368 		 * good idea to directly use the "pxfer[]" structure:
1369 		 *
1370 		 * usbd_transfer_start(sc->pxfer[0]);
1371 		 * usbd_transfer_stop(sc->pxfer[0]);
1372 		 *
1373 		 * That way, if your code has many parts that will not
1374 		 * stop running under the same lock, in other words
1375 		 * "xfer_mtx", the usbd_transfer_start and
1376 		 * usbd_transfer_stop functions will simply return
1377 		 * when they detect a NULL pointer argument.
1378 		 *
1379 		 * To avoid any races we clear the "pxfer[]" pointer
1380 		 * while holding the private mutex of the driver:
1381 		 */
1382 		pxfer[n_setup] = NULL;
1383 
1384 		USB_BUS_UNLOCK(info->bus);
1385 		USB_XFER_UNLOCK(xfer);
1386 
1387 		usbd_transfer_drain(xfer);
1388 
1389 #if USB_HAVE_BUSDMA
1390 		if (xfer->flags_int.bdma_enable)
1391 			needs_delay = 1;
1392 #endif
1393 		/*
1394 		 * NOTE: default endpoint does not have an
1395 		 * interface, even if endpoint->iface_index == 0
1396 		 */
1397 		USB_BUS_LOCK(info->bus);
1398 		xfer->endpoint->refcount_alloc--;
1399 		USB_BUS_UNLOCK(info->bus);
1400 
1401 		usb_callout_drain(&xfer->timeout_handle);
1402 
1403 		USB_BUS_LOCK(info->bus);
1404 
1405 		USB_ASSERT(info->setup_refcount != 0, ("Invalid setup "
1406 		    "reference count\n"));
1407 
1408 		info->setup_refcount--;
1409 
1410 		if (info->setup_refcount == 0) {
1411 			usbd_transfer_unsetup_sub(info,
1412 			    needs_delay);
1413 		} else {
1414 			USB_BUS_UNLOCK(info->bus);
1415 		}
1416 	}
1417 }
1418 
1419 /*------------------------------------------------------------------------*
1420  *	usbd_control_transfer_init - factored out code
1421  *
1422  * In USB Device Mode we have to wait for the SETUP packet which
1423  * containst the "struct usb_device_request" structure, before we can
1424  * transfer any data. In USB Host Mode we already have the SETUP
1425  * packet at the moment the USB transfer is started. This leads us to
1426  * having to setup the USB transfer at two different places in
1427  * time. This function just contains factored out control transfer
1428  * initialisation code, so that we don't duplicate the code.
1429  *------------------------------------------------------------------------*/
1430 static void
1431 usbd_control_transfer_init(struct usb_xfer *xfer)
1432 {
1433 	struct usb_device_request req;
1434 
1435 	/* copy out the USB request header */
1436 
1437 	usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1438 
1439 	/* setup remainder */
1440 
1441 	xfer->flags_int.control_rem = UGETW(req.wLength);
1442 
1443 	/* copy direction to endpoint variable */
1444 
1445 	xfer->endpointno &= ~(UE_DIR_IN | UE_DIR_OUT);
1446 	xfer->endpointno |=
1447 	    (req.bmRequestType & UT_READ) ? UE_DIR_IN : UE_DIR_OUT;
1448 }
1449 
1450 /*------------------------------------------------------------------------*
1451  *	usbd_control_transfer_did_data
1452  *
1453  * This function returns non-zero if a control endpoint has
1454  * transferred the first DATA packet after the SETUP packet.
1455  * Else it returns zero.
1456  *------------------------------------------------------------------------*/
1457 static uint8_t
1458 usbd_control_transfer_did_data(struct usb_xfer *xfer)
1459 {
1460 	struct usb_device_request req;
1461 
1462 	/* SETUP packet is not yet sent */
1463 	if (xfer->flags_int.control_hdr != 0)
1464 		return (0);
1465 
1466 	/* copy out the USB request header */
1467 	usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1468 
1469 	/* compare remainder to the initial value */
1470 	return (xfer->flags_int.control_rem != UGETW(req.wLength));
1471 }
1472 
1473 /*------------------------------------------------------------------------*
1474  *	usbd_setup_ctrl_transfer
1475  *
1476  * This function handles initialisation of control transfers. Control
1477  * transfers are special in that regard that they can both transmit
1478  * and receive data.
1479  *
1480  * Return values:
1481  *    0: Success
1482  * Else: Failure
1483  *------------------------------------------------------------------------*/
1484 static int
1485 usbd_setup_ctrl_transfer(struct usb_xfer *xfer)
1486 {
1487 	usb_frlength_t len;
1488 
1489 	/* Check for control endpoint stall */
1490 	if (xfer->flags.stall_pipe && xfer->flags_int.control_act) {
1491 		/* the control transfer is no longer active */
1492 		xfer->flags_int.control_stall = 1;
1493 		xfer->flags_int.control_act = 0;
1494 	} else {
1495 		/* don't stall control transfer by default */
1496 		xfer->flags_int.control_stall = 0;
1497 	}
1498 
1499 	/* Check for invalid number of frames */
1500 	if (xfer->nframes > 2) {
1501 		/*
1502 		 * If you need to split a control transfer, you
1503 		 * have to do one part at a time. Only with
1504 		 * non-control transfers you can do multiple
1505 		 * parts a time.
1506 		 */
1507 		DPRINTFN(0, "Too many frames: %u\n",
1508 		    (unsigned int)xfer->nframes);
1509 		goto error;
1510 	}
1511 
1512 	/*
1513          * Check if there is a control
1514          * transfer in progress:
1515          */
1516 	if (xfer->flags_int.control_act) {
1517 
1518 		if (xfer->flags_int.control_hdr) {
1519 
1520 			/* clear send header flag */
1521 
1522 			xfer->flags_int.control_hdr = 0;
1523 
1524 			/* setup control transfer */
1525 			if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1526 				usbd_control_transfer_init(xfer);
1527 			}
1528 		}
1529 		/* get data length */
1530 
1531 		len = xfer->sumlen;
1532 
1533 	} else {
1534 
1535 		/* the size of the SETUP structure is hardcoded ! */
1536 
1537 		if (xfer->frlengths[0] != sizeof(struct usb_device_request)) {
1538 			DPRINTFN(0, "Wrong framelength %u != %zu\n",
1539 			    xfer->frlengths[0], sizeof(struct
1540 			    usb_device_request));
1541 			goto error;
1542 		}
1543 		/* check USB mode */
1544 		if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1545 
1546 			/* check number of frames */
1547 			if (xfer->nframes != 1) {
1548 				/*
1549 			         * We need to receive the setup
1550 			         * message first so that we know the
1551 			         * data direction!
1552 			         */
1553 				DPRINTF("Misconfigured transfer\n");
1554 				goto error;
1555 			}
1556 			/*
1557 			 * Set a dummy "control_rem" value.  This
1558 			 * variable will be overwritten later by a
1559 			 * call to "usbd_control_transfer_init()" !
1560 			 */
1561 			xfer->flags_int.control_rem = 0xFFFF;
1562 		} else {
1563 
1564 			/* setup "endpoint" and "control_rem" */
1565 
1566 			usbd_control_transfer_init(xfer);
1567 		}
1568 
1569 		/* set transfer-header flag */
1570 
1571 		xfer->flags_int.control_hdr = 1;
1572 
1573 		/* get data length */
1574 
1575 		len = (xfer->sumlen - sizeof(struct usb_device_request));
1576 	}
1577 
1578 	/* update did data flag */
1579 
1580 	xfer->flags_int.control_did_data =
1581 	    usbd_control_transfer_did_data(xfer);
1582 
1583 	/* check if there is a length mismatch */
1584 
1585 	if (len > xfer->flags_int.control_rem) {
1586 		DPRINTFN(0, "Length (%d) greater than "
1587 		    "remaining length (%d)\n", len,
1588 		    xfer->flags_int.control_rem);
1589 		goto error;
1590 	}
1591 	/* check if we are doing a short transfer */
1592 
1593 	if (xfer->flags.force_short_xfer) {
1594 		xfer->flags_int.control_rem = 0;
1595 	} else {
1596 		if ((len != xfer->max_data_length) &&
1597 		    (len != xfer->flags_int.control_rem) &&
1598 		    (xfer->nframes != 1)) {
1599 			DPRINTFN(0, "Short control transfer without "
1600 			    "force_short_xfer set\n");
1601 			goto error;
1602 		}
1603 		xfer->flags_int.control_rem -= len;
1604 	}
1605 
1606 	/* the status part is executed when "control_act" is 0 */
1607 
1608 	if ((xfer->flags_int.control_rem > 0) ||
1609 	    (xfer->flags.manual_status)) {
1610 		/* don't execute the STATUS stage yet */
1611 		xfer->flags_int.control_act = 1;
1612 
1613 		/* sanity check */
1614 		if ((!xfer->flags_int.control_hdr) &&
1615 		    (xfer->nframes == 1)) {
1616 			/*
1617 		         * This is not a valid operation!
1618 		         */
1619 			DPRINTFN(0, "Invalid parameter "
1620 			    "combination\n");
1621 			goto error;
1622 		}
1623 	} else {
1624 		/* time to execute the STATUS stage */
1625 		xfer->flags_int.control_act = 0;
1626 	}
1627 	return (0);			/* success */
1628 
1629 error:
1630 	return (1);			/* failure */
1631 }
1632 
1633 /*------------------------------------------------------------------------*
1634  *	usbd_transfer_submit - start USB hardware for the given transfer
1635  *
1636  * This function should only be called from the USB callback.
1637  *------------------------------------------------------------------------*/
1638 void
1639 usbd_transfer_submit(struct usb_xfer *xfer)
1640 {
1641 	struct usb_xfer_root *info;
1642 	struct usb_bus *bus;
1643 	usb_frcount_t x;
1644 
1645 	info = xfer->xroot;
1646 	bus = info->bus;
1647 
1648 	DPRINTF("xfer=%p, endpoint=%p, nframes=%d, dir=%s\n",
1649 	    xfer, xfer->endpoint, xfer->nframes, USB_GET_DATA_ISREAD(xfer) ?
1650 	    "read" : "write");
1651 
1652 #ifdef USB_DEBUG
1653 	if (USB_DEBUG_VAR > 0) {
1654 		USB_BUS_LOCK(bus);
1655 
1656 		usb_dump_endpoint(xfer->endpoint);
1657 
1658 		USB_BUS_UNLOCK(bus);
1659 	}
1660 #endif
1661 
1662 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1663 	USB_BUS_LOCK_ASSERT(bus, MA_NOTOWNED);
1664 
1665 	/* Only open the USB transfer once! */
1666 	if (!xfer->flags_int.open) {
1667 		xfer->flags_int.open = 1;
1668 
1669 		DPRINTF("open\n");
1670 
1671 		USB_BUS_LOCK(bus);
1672 		(xfer->endpoint->methods->open) (xfer);
1673 		USB_BUS_UNLOCK(bus);
1674 	}
1675 	/* set "transferring" flag */
1676 	xfer->flags_int.transferring = 1;
1677 
1678 #if USB_HAVE_POWERD
1679 	/* increment power reference */
1680 	usbd_transfer_power_ref(xfer, 1);
1681 #endif
1682 	/*
1683 	 * Check if the transfer is waiting on a queue, most
1684 	 * frequently the "done_q":
1685 	 */
1686 	if (xfer->wait_queue) {
1687 		USB_BUS_LOCK(bus);
1688 		usbd_transfer_dequeue(xfer);
1689 		USB_BUS_UNLOCK(bus);
1690 	}
1691 	/* clear "did_dma_delay" flag */
1692 	xfer->flags_int.did_dma_delay = 0;
1693 
1694 	/* clear "did_close" flag */
1695 	xfer->flags_int.did_close = 0;
1696 
1697 #if USB_HAVE_BUSDMA
1698 	/* clear "bdma_setup" flag */
1699 	xfer->flags_int.bdma_setup = 0;
1700 #endif
1701 	/* by default we cannot cancel any USB transfer immediately */
1702 	xfer->flags_int.can_cancel_immed = 0;
1703 
1704 	/* clear lengths and frame counts by default */
1705 	xfer->sumlen = 0;
1706 	xfer->actlen = 0;
1707 	xfer->aframes = 0;
1708 
1709 	/* clear any previous errors */
1710 	xfer->error = 0;
1711 
1712 	/* Check if the device is still alive */
1713 	if (info->udev->state < USB_STATE_POWERED) {
1714 		USB_BUS_LOCK(bus);
1715 		/*
1716 		 * Must return cancelled error code else
1717 		 * device drivers can hang.
1718 		 */
1719 		usbd_transfer_done(xfer, USB_ERR_CANCELLED);
1720 		USB_BUS_UNLOCK(bus);
1721 		return;
1722 	}
1723 
1724 	/* sanity check */
1725 	if (xfer->nframes == 0) {
1726 		if (xfer->flags.stall_pipe) {
1727 			/*
1728 			 * Special case - want to stall without transferring
1729 			 * any data:
1730 			 */
1731 			DPRINTF("xfer=%p nframes=0: stall "
1732 			    "or clear stall!\n", xfer);
1733 			USB_BUS_LOCK(bus);
1734 			xfer->flags_int.can_cancel_immed = 1;
1735 			/* start the transfer */
1736 			usb_command_wrapper(&xfer->endpoint->
1737 			    endpoint_q[xfer->stream_id], xfer);
1738 			USB_BUS_UNLOCK(bus);
1739 			return;
1740 		}
1741 		USB_BUS_LOCK(bus);
1742 		usbd_transfer_done(xfer, USB_ERR_INVAL);
1743 		USB_BUS_UNLOCK(bus);
1744 		return;
1745 	}
1746 	/* compute some variables */
1747 
1748 	for (x = 0; x != xfer->nframes; x++) {
1749 		/* make a copy of the frlenghts[] */
1750 		xfer->frlengths[x + xfer->max_frame_count] = xfer->frlengths[x];
1751 		/* compute total transfer length */
1752 		xfer->sumlen += xfer->frlengths[x];
1753 		if (xfer->sumlen < xfer->frlengths[x]) {
1754 			/* length wrapped around */
1755 			USB_BUS_LOCK(bus);
1756 			usbd_transfer_done(xfer, USB_ERR_INVAL);
1757 			USB_BUS_UNLOCK(bus);
1758 			return;
1759 		}
1760 	}
1761 
1762 	/* clear some internal flags */
1763 
1764 	xfer->flags_int.short_xfer_ok = 0;
1765 	xfer->flags_int.short_frames_ok = 0;
1766 
1767 	/* check if this is a control transfer */
1768 
1769 	if (xfer->flags_int.control_xfr) {
1770 
1771 		if (usbd_setup_ctrl_transfer(xfer)) {
1772 			USB_BUS_LOCK(bus);
1773 			usbd_transfer_done(xfer, USB_ERR_STALLED);
1774 			USB_BUS_UNLOCK(bus);
1775 			return;
1776 		}
1777 	}
1778 	/*
1779 	 * Setup filtered version of some transfer flags,
1780 	 * in case of data read direction
1781 	 */
1782 	if (USB_GET_DATA_ISREAD(xfer)) {
1783 
1784 		if (xfer->flags.short_frames_ok) {
1785 			xfer->flags_int.short_xfer_ok = 1;
1786 			xfer->flags_int.short_frames_ok = 1;
1787 		} else if (xfer->flags.short_xfer_ok) {
1788 			xfer->flags_int.short_xfer_ok = 1;
1789 
1790 			/* check for control transfer */
1791 			if (xfer->flags_int.control_xfr) {
1792 				/*
1793 				 * 1) Control transfers do not support
1794 				 * reception of multiple short USB
1795 				 * frames in host mode and device side
1796 				 * mode, with exception of:
1797 				 *
1798 				 * 2) Due to sometimes buggy device
1799 				 * side firmware we need to do a
1800 				 * STATUS stage in case of short
1801 				 * control transfers in USB host mode.
1802 				 * The STATUS stage then becomes the
1803 				 * "alt_next" to the DATA stage.
1804 				 */
1805 				xfer->flags_int.short_frames_ok = 1;
1806 			}
1807 		}
1808 	}
1809 	/*
1810 	 * Check if BUS-DMA support is enabled and try to load virtual
1811 	 * buffers into DMA, if any:
1812 	 */
1813 #if USB_HAVE_BUSDMA
1814 	if (xfer->flags_int.bdma_enable) {
1815 		/* insert the USB transfer last in the BUS-DMA queue */
1816 		usb_command_wrapper(&xfer->xroot->dma_q, xfer);
1817 		return;
1818 	}
1819 #endif
1820 	/*
1821 	 * Enter the USB transfer into the Host Controller or
1822 	 * Device Controller schedule:
1823 	 */
1824 	usbd_pipe_enter(xfer);
1825 }
1826 
1827 /*------------------------------------------------------------------------*
1828  *	usbd_pipe_enter - factored out code
1829  *------------------------------------------------------------------------*/
1830 void
1831 usbd_pipe_enter(struct usb_xfer *xfer)
1832 {
1833 	struct usb_endpoint *ep;
1834 
1835 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1836 
1837 	USB_BUS_LOCK(xfer->xroot->bus);
1838 
1839 	ep = xfer->endpoint;
1840 
1841 	DPRINTF("enter\n");
1842 
1843 	/* the transfer can now be cancelled */
1844 	xfer->flags_int.can_cancel_immed = 1;
1845 
1846 	/* enter the transfer */
1847 	(ep->methods->enter) (xfer);
1848 
1849 	/* check for transfer error */
1850 	if (xfer->error) {
1851 		/* some error has happened */
1852 		usbd_transfer_done(xfer, 0);
1853 		USB_BUS_UNLOCK(xfer->xroot->bus);
1854 		return;
1855 	}
1856 
1857 	/* start the transfer */
1858 	usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], xfer);
1859 	USB_BUS_UNLOCK(xfer->xroot->bus);
1860 }
1861 
1862 /*------------------------------------------------------------------------*
1863  *	usbd_transfer_start - start an USB transfer
1864  *
1865  * NOTE: Calling this function more than one time will only
1866  *       result in a single transfer start, until the USB transfer
1867  *       completes.
1868  *------------------------------------------------------------------------*/
1869 void
1870 usbd_transfer_start(struct usb_xfer *xfer)
1871 {
1872 	if (xfer == NULL) {
1873 		/* transfer is gone */
1874 		return;
1875 	}
1876 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1877 
1878 	/* mark the USB transfer started */
1879 
1880 	if (!xfer->flags_int.started) {
1881 		/* lock the BUS lock to avoid races updating flags_int */
1882 		USB_BUS_LOCK(xfer->xroot->bus);
1883 		xfer->flags_int.started = 1;
1884 		USB_BUS_UNLOCK(xfer->xroot->bus);
1885 	}
1886 	/* check if the USB transfer callback is already transferring */
1887 
1888 	if (xfer->flags_int.transferring) {
1889 		return;
1890 	}
1891 	USB_BUS_LOCK(xfer->xroot->bus);
1892 	/* call the USB transfer callback */
1893 	usbd_callback_ss_done_defer(xfer);
1894 	USB_BUS_UNLOCK(xfer->xroot->bus);
1895 }
1896 
1897 /*------------------------------------------------------------------------*
1898  *	usbd_transfer_stop - stop an USB transfer
1899  *
1900  * NOTE: Calling this function more than one time will only
1901  *       result in a single transfer stop.
1902  * NOTE: When this function returns it is not safe to free nor
1903  *       reuse any DMA buffers. See "usbd_transfer_drain()".
1904  *------------------------------------------------------------------------*/
1905 void
1906 usbd_transfer_stop(struct usb_xfer *xfer)
1907 {
1908 	struct usb_endpoint *ep;
1909 
1910 	if (xfer == NULL) {
1911 		/* transfer is gone */
1912 		return;
1913 	}
1914 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1915 
1916 	/* check if the USB transfer was ever opened */
1917 
1918 	if (!xfer->flags_int.open) {
1919 		if (xfer->flags_int.started) {
1920 			/* nothing to do except clearing the "started" flag */
1921 			/* lock the BUS lock to avoid races updating flags_int */
1922 			USB_BUS_LOCK(xfer->xroot->bus);
1923 			xfer->flags_int.started = 0;
1924 			USB_BUS_UNLOCK(xfer->xroot->bus);
1925 		}
1926 		return;
1927 	}
1928 	/* try to stop the current USB transfer */
1929 
1930 	USB_BUS_LOCK(xfer->xroot->bus);
1931 	/* override any previous error */
1932 	xfer->error = USB_ERR_CANCELLED;
1933 
1934 	/*
1935 	 * Clear "open" and "started" when both private and USB lock
1936 	 * is locked so that we don't get a race updating "flags_int"
1937 	 */
1938 	xfer->flags_int.open = 0;
1939 	xfer->flags_int.started = 0;
1940 
1941 	/*
1942 	 * Check if we can cancel the USB transfer immediately.
1943 	 */
1944 	if (xfer->flags_int.transferring) {
1945 		if (xfer->flags_int.can_cancel_immed &&
1946 		    (!xfer->flags_int.did_close)) {
1947 			DPRINTF("close\n");
1948 			/*
1949 			 * The following will lead to an USB_ERR_CANCELLED
1950 			 * error code being passed to the USB callback.
1951 			 */
1952 			(xfer->endpoint->methods->close) (xfer);
1953 			/* only close once */
1954 			xfer->flags_int.did_close = 1;
1955 		} else {
1956 			/* need to wait for the next done callback */
1957 		}
1958 	} else {
1959 		DPRINTF("close\n");
1960 
1961 		/* close here and now */
1962 		(xfer->endpoint->methods->close) (xfer);
1963 
1964 		/*
1965 		 * Any additional DMA delay is done by
1966 		 * "usbd_transfer_unsetup()".
1967 		 */
1968 
1969 		/*
1970 		 * Special case. Check if we need to restart a blocked
1971 		 * endpoint.
1972 		 */
1973 		ep = xfer->endpoint;
1974 
1975 		/*
1976 		 * If the current USB transfer is completing we need
1977 		 * to start the next one:
1978 		 */
1979 		if (ep->endpoint_q[xfer->stream_id].curr == xfer) {
1980 			usb_command_wrapper(
1981 			    &ep->endpoint_q[xfer->stream_id], NULL);
1982 		}
1983 	}
1984 
1985 	USB_BUS_UNLOCK(xfer->xroot->bus);
1986 }
1987 
1988 /*------------------------------------------------------------------------*
1989  *	usbd_transfer_pending
1990  *
1991  * This function will check if an USB transfer is pending which is a
1992  * little bit complicated!
1993  * Return values:
1994  * 0: Not pending
1995  * 1: Pending: The USB transfer will receive a callback in the future.
1996  *------------------------------------------------------------------------*/
1997 uint8_t
1998 usbd_transfer_pending(struct usb_xfer *xfer)
1999 {
2000 	struct usb_xfer_root *info;
2001 	struct usb_xfer_queue *pq;
2002 
2003 	if (xfer == NULL) {
2004 		/* transfer is gone */
2005 		return (0);
2006 	}
2007 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2008 
2009 	if (xfer->flags_int.transferring) {
2010 		/* trivial case */
2011 		return (1);
2012 	}
2013 	USB_BUS_LOCK(xfer->xroot->bus);
2014 	if (xfer->wait_queue) {
2015 		/* we are waiting on a queue somewhere */
2016 		USB_BUS_UNLOCK(xfer->xroot->bus);
2017 		return (1);
2018 	}
2019 	info = xfer->xroot;
2020 	pq = &info->done_q;
2021 
2022 	if (pq->curr == xfer) {
2023 		/* we are currently scheduled for callback */
2024 		USB_BUS_UNLOCK(xfer->xroot->bus);
2025 		return (1);
2026 	}
2027 	/* we are not pending */
2028 	USB_BUS_UNLOCK(xfer->xroot->bus);
2029 	return (0);
2030 }
2031 
2032 /*------------------------------------------------------------------------*
2033  *	usbd_transfer_drain
2034  *
2035  * This function will stop the USB transfer and wait for any
2036  * additional BUS-DMA and HW-DMA operations to complete. Buffers that
2037  * are loaded into DMA can safely be freed or reused after that this
2038  * function has returned.
2039  *------------------------------------------------------------------------*/
2040 void
2041 usbd_transfer_drain(struct usb_xfer *xfer)
2042 {
2043 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2044 	    "usbd_transfer_drain can sleep!");
2045 
2046 	if (xfer == NULL) {
2047 		/* transfer is gone */
2048 		return;
2049 	}
2050 	if (xfer->xroot->xfer_mtx != &Giant) {
2051 		USB_XFER_LOCK_ASSERT(xfer, MA_NOTOWNED);
2052 	}
2053 	USB_XFER_LOCK(xfer);
2054 
2055 	usbd_transfer_stop(xfer);
2056 
2057 	while (usbd_transfer_pending(xfer) ||
2058 	    xfer->flags_int.doing_callback) {
2059 
2060 		/*
2061 		 * It is allowed that the callback can drop its
2062 		 * transfer mutex. In that case checking only
2063 		 * "usbd_transfer_pending()" is not enough to tell if
2064 		 * the USB transfer is fully drained. We also need to
2065 		 * check the internal "doing_callback" flag.
2066 		 */
2067 		xfer->flags_int.draining = 1;
2068 
2069 		/*
2070 		 * Wait until the current outstanding USB
2071 		 * transfer is complete !
2072 		 */
2073 		cv_wait(&xfer->xroot->cv_drain, xfer->xroot->xfer_mtx);
2074 	}
2075 	USB_XFER_UNLOCK(xfer);
2076 }
2077 
2078 struct usb_page_cache *
2079 usbd_xfer_get_frame(struct usb_xfer *xfer, usb_frcount_t frindex)
2080 {
2081 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2082 
2083 	return (&xfer->frbuffers[frindex]);
2084 }
2085 
2086 void *
2087 usbd_xfer_get_frame_buffer(struct usb_xfer *xfer, usb_frcount_t frindex)
2088 {
2089 	struct usb_page_search page_info;
2090 
2091 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2092 
2093 	usbd_get_page(&xfer->frbuffers[frindex], 0, &page_info);
2094 	return (page_info.buffer);
2095 }
2096 
2097 /*------------------------------------------------------------------------*
2098  *	usbd_xfer_get_fps_shift
2099  *
2100  * The following function is only useful for isochronous transfers. It
2101  * returns how many times the frame execution rate has been shifted
2102  * down.
2103  *
2104  * Return value:
2105  * Success: 0..3
2106  * Failure: 0
2107  *------------------------------------------------------------------------*/
2108 uint8_t
2109 usbd_xfer_get_fps_shift(struct usb_xfer *xfer)
2110 {
2111 	return (xfer->fps_shift);
2112 }
2113 
2114 usb_frlength_t
2115 usbd_xfer_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex)
2116 {
2117 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2118 
2119 	return (xfer->frlengths[frindex]);
2120 }
2121 
2122 /*------------------------------------------------------------------------*
2123  *	usbd_xfer_set_frame_data
2124  *
2125  * This function sets the pointer of the buffer that should
2126  * loaded directly into DMA for the given USB frame. Passing "ptr"
2127  * equal to NULL while the corresponding "frlength" is greater
2128  * than zero gives undefined results!
2129  *------------------------------------------------------------------------*/
2130 void
2131 usbd_xfer_set_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
2132     void *ptr, usb_frlength_t len)
2133 {
2134 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2135 
2136 	/* set virtual address to load and length */
2137 	xfer->frbuffers[frindex].buffer = ptr;
2138 	usbd_xfer_set_frame_len(xfer, frindex, len);
2139 }
2140 
2141 void
2142 usbd_xfer_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
2143     void **ptr, int *len)
2144 {
2145 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2146 
2147 	if (ptr != NULL)
2148 		*ptr = xfer->frbuffers[frindex].buffer;
2149 	if (len != NULL)
2150 		*len = xfer->frlengths[frindex];
2151 }
2152 
2153 /*------------------------------------------------------------------------*
2154  *	usbd_xfer_old_frame_length
2155  *
2156  * This function returns the framelength of the given frame at the
2157  * time the transfer was submitted. This function can be used to
2158  * compute the starting data pointer of the next isochronous frame
2159  * when an isochronous transfer has completed.
2160  *------------------------------------------------------------------------*/
2161 usb_frlength_t
2162 usbd_xfer_old_frame_length(struct usb_xfer *xfer, usb_frcount_t frindex)
2163 {
2164 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2165 
2166 	return (xfer->frlengths[frindex + xfer->max_frame_count]);
2167 }
2168 
2169 void
2170 usbd_xfer_status(struct usb_xfer *xfer, int *actlen, int *sumlen, int *aframes,
2171     int *nframes)
2172 {
2173 	if (actlen != NULL)
2174 		*actlen = xfer->actlen;
2175 	if (sumlen != NULL)
2176 		*sumlen = xfer->sumlen;
2177 	if (aframes != NULL)
2178 		*aframes = xfer->aframes;
2179 	if (nframes != NULL)
2180 		*nframes = xfer->nframes;
2181 }
2182 
2183 /*------------------------------------------------------------------------*
2184  *	usbd_xfer_set_frame_offset
2185  *
2186  * This function sets the frame data buffer offset relative to the beginning
2187  * of the USB DMA buffer allocated for this USB transfer.
2188  *------------------------------------------------------------------------*/
2189 void
2190 usbd_xfer_set_frame_offset(struct usb_xfer *xfer, usb_frlength_t offset,
2191     usb_frcount_t frindex)
2192 {
2193 	KASSERT(!xfer->flags.ext_buffer, ("Cannot offset data frame "
2194 	    "when the USB buffer is external\n"));
2195 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2196 
2197 	/* set virtual address to load */
2198 	xfer->frbuffers[frindex].buffer =
2199 	    USB_ADD_BYTES(xfer->local_buffer, offset);
2200 }
2201 
2202 void
2203 usbd_xfer_set_interval(struct usb_xfer *xfer, int i)
2204 {
2205 	xfer->interval = i;
2206 }
2207 
2208 void
2209 usbd_xfer_set_timeout(struct usb_xfer *xfer, int t)
2210 {
2211 	xfer->timeout = t;
2212 }
2213 
2214 void
2215 usbd_xfer_set_frames(struct usb_xfer *xfer, usb_frcount_t n)
2216 {
2217 	xfer->nframes = n;
2218 }
2219 
2220 usb_frcount_t
2221 usbd_xfer_max_frames(struct usb_xfer *xfer)
2222 {
2223 	return (xfer->max_frame_count);
2224 }
2225 
2226 usb_frlength_t
2227 usbd_xfer_max_len(struct usb_xfer *xfer)
2228 {
2229 	return (xfer->max_data_length);
2230 }
2231 
2232 usb_frlength_t
2233 usbd_xfer_max_framelen(struct usb_xfer *xfer)
2234 {
2235 	return (xfer->max_frame_size);
2236 }
2237 
2238 void
2239 usbd_xfer_set_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex,
2240     usb_frlength_t len)
2241 {
2242 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2243 
2244 	xfer->frlengths[frindex] = len;
2245 }
2246 
2247 /*------------------------------------------------------------------------*
2248  *	usb_callback_proc - factored out code
2249  *
2250  * This function performs USB callbacks.
2251  *------------------------------------------------------------------------*/
2252 static void
2253 usb_callback_proc(struct usb_proc_msg *_pm)
2254 {
2255 	struct usb_done_msg *pm = (void *)_pm;
2256 	struct usb_xfer_root *info = pm->xroot;
2257 
2258 	/* Change locking order */
2259 	USB_BUS_UNLOCK(info->bus);
2260 
2261 	/*
2262 	 * We exploit the fact that the mutex is the same for all
2263 	 * callbacks that will be called from this thread:
2264 	 */
2265 	mtx_lock(info->xfer_mtx);
2266 	USB_BUS_LOCK(info->bus);
2267 
2268 	/* Continue where we lost track */
2269 	usb_command_wrapper(&info->done_q,
2270 	    info->done_q.curr);
2271 
2272 	mtx_unlock(info->xfer_mtx);
2273 }
2274 
2275 /*------------------------------------------------------------------------*
2276  *	usbd_callback_ss_done_defer
2277  *
2278  * This function will defer the start, stop and done callback to the
2279  * correct thread.
2280  *------------------------------------------------------------------------*/
2281 static void
2282 usbd_callback_ss_done_defer(struct usb_xfer *xfer)
2283 {
2284 	struct usb_xfer_root *info = xfer->xroot;
2285 	struct usb_xfer_queue *pq = &info->done_q;
2286 
2287 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2288 
2289 	if (pq->curr != xfer) {
2290 		usbd_transfer_enqueue(pq, xfer);
2291 	}
2292 	if (!pq->recurse_1) {
2293 
2294 		/*
2295 	         * We have to postpone the callback due to the fact we
2296 	         * will have a Lock Order Reversal, LOR, if we try to
2297 	         * proceed !
2298 	         */
2299 		(void) usb_proc_msignal(info->done_p,
2300 		    &info->done_m[0], &info->done_m[1]);
2301 	} else {
2302 		/* clear second recurse flag */
2303 		pq->recurse_2 = 0;
2304 	}
2305 	return;
2306 
2307 }
2308 
2309 /*------------------------------------------------------------------------*
2310  *	usbd_callback_wrapper
2311  *
2312  * This is a wrapper for USB callbacks. This wrapper does some
2313  * auto-magic things like figuring out if we can call the callback
2314  * directly from the current context or if we need to wakeup the
2315  * interrupt process.
2316  *------------------------------------------------------------------------*/
2317 static void
2318 usbd_callback_wrapper(struct usb_xfer_queue *pq)
2319 {
2320 	struct usb_xfer *xfer = pq->curr;
2321 	struct usb_xfer_root *info = xfer->xroot;
2322 
2323 	USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2324 	if ((pq->recurse_3 != 0 || mtx_owned(info->xfer_mtx) == 0) &&
2325 	    SCHEDULER_STOPPED() == 0) {
2326 		/*
2327 	       	 * Cases that end up here:
2328 		 *
2329 		 * 5) HW interrupt done callback or other source.
2330 		 * 6) HW completed transfer during callback
2331 		 */
2332 		DPRINTFN(3, "case 5 and 6\n");
2333 
2334 		/*
2335 	         * We have to postpone the callback due to the fact we
2336 	         * will have a Lock Order Reversal, LOR, if we try to
2337 	         * proceed!
2338 		 *
2339 		 * Postponing the callback also ensures that other USB
2340 		 * transfer queues get a chance.
2341 	         */
2342 		(void) usb_proc_msignal(info->done_p,
2343 		    &info->done_m[0], &info->done_m[1]);
2344 		return;
2345 	}
2346 	/*
2347 	 * Cases that end up here:
2348 	 *
2349 	 * 1) We are starting a transfer
2350 	 * 2) We are prematurely calling back a transfer
2351 	 * 3) We are stopping a transfer
2352 	 * 4) We are doing an ordinary callback
2353 	 */
2354 	DPRINTFN(3, "case 1-4\n");
2355 	/* get next USB transfer in the queue */
2356 	info->done_q.curr = NULL;
2357 
2358 	/* set flag in case of drain */
2359 	xfer->flags_int.doing_callback = 1;
2360 
2361 	USB_BUS_UNLOCK(info->bus);
2362 	USB_BUS_LOCK_ASSERT(info->bus, MA_NOTOWNED);
2363 
2364 	/* set correct USB state for callback */
2365 	if (!xfer->flags_int.transferring) {
2366 		xfer->usb_state = USB_ST_SETUP;
2367 		if (!xfer->flags_int.started) {
2368 			/* we got stopped before we even got started */
2369 			USB_BUS_LOCK(info->bus);
2370 			goto done;
2371 		}
2372 	} else {
2373 
2374 		if (usbd_callback_wrapper_sub(xfer)) {
2375 			/* the callback has been deferred */
2376 			USB_BUS_LOCK(info->bus);
2377 			goto done;
2378 		}
2379 #if USB_HAVE_POWERD
2380 		/* decrement power reference */
2381 		usbd_transfer_power_ref(xfer, -1);
2382 #endif
2383 		xfer->flags_int.transferring = 0;
2384 
2385 		if (xfer->error) {
2386 			xfer->usb_state = USB_ST_ERROR;
2387 		} else {
2388 			/* set transferred state */
2389 			xfer->usb_state = USB_ST_TRANSFERRED;
2390 #if USB_HAVE_BUSDMA
2391 			/* sync DMA memory, if any */
2392 			if (xfer->flags_int.bdma_enable &&
2393 			    (!xfer->flags_int.bdma_no_post_sync)) {
2394 				usb_bdma_post_sync(xfer);
2395 			}
2396 #endif
2397 		}
2398 	}
2399 
2400 #if USB_HAVE_PF
2401 	if (xfer->usb_state != USB_ST_SETUP) {
2402 		USB_BUS_LOCK(info->bus);
2403 		usbpf_xfertap(xfer, USBPF_XFERTAP_DONE);
2404 		USB_BUS_UNLOCK(info->bus);
2405 	}
2406 #endif
2407 	/* call processing routine */
2408 	(xfer->callback) (xfer, xfer->error);
2409 
2410 	/* pickup the USB mutex again */
2411 	USB_BUS_LOCK(info->bus);
2412 
2413 	/*
2414 	 * Check if we got started after that we got cancelled, but
2415 	 * before we managed to do the callback.
2416 	 */
2417 	if ((!xfer->flags_int.open) &&
2418 	    (xfer->flags_int.started) &&
2419 	    (xfer->usb_state == USB_ST_ERROR)) {
2420 		/* clear flag in case of drain */
2421 		xfer->flags_int.doing_callback = 0;
2422 		/* try to loop, but not recursivly */
2423 		usb_command_wrapper(&info->done_q, xfer);
2424 		return;
2425 	}
2426 
2427 done:
2428 	/* clear flag in case of drain */
2429 	xfer->flags_int.doing_callback = 0;
2430 
2431 	/*
2432 	 * Check if we are draining.
2433 	 */
2434 	if (xfer->flags_int.draining &&
2435 	    (!xfer->flags_int.transferring)) {
2436 		/* "usbd_transfer_drain()" is waiting for end of transfer */
2437 		xfer->flags_int.draining = 0;
2438 		cv_broadcast(&info->cv_drain);
2439 	}
2440 
2441 	/* do the next callback, if any */
2442 	usb_command_wrapper(&info->done_q,
2443 	    info->done_q.curr);
2444 }
2445 
2446 /*------------------------------------------------------------------------*
2447  *	usb_dma_delay_done_cb
2448  *
2449  * This function is called when the DMA delay has been exectuded, and
2450  * will make sure that the callback is called to complete the USB
2451  * transfer. This code path is ususally only used when there is an USB
2452  * error like USB_ERR_CANCELLED.
2453  *------------------------------------------------------------------------*/
2454 void
2455 usb_dma_delay_done_cb(struct usb_xfer *xfer)
2456 {
2457 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2458 
2459 	DPRINTFN(3, "Completed %p\n", xfer);
2460 
2461 	/* queue callback for execution, again */
2462 	usbd_transfer_done(xfer, 0);
2463 }
2464 
2465 /*------------------------------------------------------------------------*
2466  *	usbd_transfer_dequeue
2467  *
2468  *  - This function is used to remove an USB transfer from a USB
2469  *  transfer queue.
2470  *
2471  *  - This function can be called multiple times in a row.
2472  *------------------------------------------------------------------------*/
2473 void
2474 usbd_transfer_dequeue(struct usb_xfer *xfer)
2475 {
2476 	struct usb_xfer_queue *pq;
2477 
2478 	pq = xfer->wait_queue;
2479 	if (pq) {
2480 		TAILQ_REMOVE(&pq->head, xfer, wait_entry);
2481 		xfer->wait_queue = NULL;
2482 	}
2483 }
2484 
2485 /*------------------------------------------------------------------------*
2486  *	usbd_transfer_enqueue
2487  *
2488  *  - This function is used to insert an USB transfer into a USB *
2489  *  transfer queue.
2490  *
2491  *  - This function can be called multiple times in a row.
2492  *------------------------------------------------------------------------*/
2493 void
2494 usbd_transfer_enqueue(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2495 {
2496 	/*
2497 	 * Insert the USB transfer into the queue, if it is not
2498 	 * already on a USB transfer queue:
2499 	 */
2500 	if (xfer->wait_queue == NULL) {
2501 		xfer->wait_queue = pq;
2502 		TAILQ_INSERT_TAIL(&pq->head, xfer, wait_entry);
2503 	}
2504 }
2505 
2506 /*------------------------------------------------------------------------*
2507  *	usbd_transfer_done
2508  *
2509  *  - This function is used to remove an USB transfer from the busdma,
2510  *  pipe or interrupt queue.
2511  *
2512  *  - This function is used to queue the USB transfer on the done
2513  *  queue.
2514  *
2515  *  - This function is used to stop any USB transfer timeouts.
2516  *------------------------------------------------------------------------*/
2517 void
2518 usbd_transfer_done(struct usb_xfer *xfer, usb_error_t error)
2519 {
2520 	struct usb_xfer_root *info = xfer->xroot;
2521 
2522 	USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2523 
2524 	DPRINTF("err=%s\n", usbd_errstr(error));
2525 
2526 	/*
2527 	 * If we are not transferring then just return.
2528 	 * This can happen during transfer cancel.
2529 	 */
2530 	if (!xfer->flags_int.transferring) {
2531 		DPRINTF("not transferring\n");
2532 		/* end of control transfer, if any */
2533 		xfer->flags_int.control_act = 0;
2534 		return;
2535 	}
2536 	/* only set transfer error, if not already set */
2537 	if (xfer->error == USB_ERR_NORMAL_COMPLETION)
2538 		xfer->error = error;
2539 
2540 	/* stop any callouts */
2541 	usb_callout_stop(&xfer->timeout_handle);
2542 
2543 	/*
2544 	 * If we are waiting on a queue, just remove the USB transfer
2545 	 * from the queue, if any. We should have the required locks
2546 	 * locked to do the remove when this function is called.
2547 	 */
2548 	usbd_transfer_dequeue(xfer);
2549 
2550 #if USB_HAVE_BUSDMA
2551 	if (mtx_owned(info->xfer_mtx)) {
2552 		struct usb_xfer_queue *pq;
2553 
2554 		/*
2555 		 * If the private USB lock is not locked, then we assume
2556 		 * that the BUS-DMA load stage has been passed:
2557 		 */
2558 		pq = &info->dma_q;
2559 
2560 		if (pq->curr == xfer) {
2561 			/* start the next BUS-DMA load, if any */
2562 			usb_command_wrapper(pq, NULL);
2563 		}
2564 	}
2565 #endif
2566 	/* keep some statistics */
2567 	if (xfer->error) {
2568 		info->bus->stats_err.uds_requests
2569 		    [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2570 	} else {
2571 		info->bus->stats_ok.uds_requests
2572 		    [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2573 	}
2574 
2575 	/* call the USB transfer callback */
2576 	usbd_callback_ss_done_defer(xfer);
2577 }
2578 
2579 /*------------------------------------------------------------------------*
2580  *	usbd_transfer_start_cb
2581  *
2582  * This function is called to start the USB transfer when
2583  * "xfer->interval" is greater than zero, and and the endpoint type is
2584  * BULK or CONTROL.
2585  *------------------------------------------------------------------------*/
2586 static void
2587 usbd_transfer_start_cb(void *arg)
2588 {
2589 	struct usb_xfer *xfer = arg;
2590 	struct usb_endpoint *ep = xfer->endpoint;
2591 
2592 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2593 
2594 	DPRINTF("start\n");
2595 
2596 #if USB_HAVE_PF
2597 	usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2598 #endif
2599 
2600 	/* the transfer can now be cancelled */
2601 	xfer->flags_int.can_cancel_immed = 1;
2602 
2603 	/* start USB transfer, if no error */
2604 	if (xfer->error == 0)
2605 		(ep->methods->start) (xfer);
2606 
2607 	/* check for transfer error */
2608 	if (xfer->error) {
2609 		/* some error has happened */
2610 		usbd_transfer_done(xfer, 0);
2611 	}
2612 }
2613 
2614 /*------------------------------------------------------------------------*
2615  *	usbd_xfer_set_stall
2616  *
2617  * This function is used to set the stall flag outside the
2618  * callback. This function is NULL safe.
2619  *------------------------------------------------------------------------*/
2620 void
2621 usbd_xfer_set_stall(struct usb_xfer *xfer)
2622 {
2623 	if (xfer == NULL) {
2624 		/* tearing down */
2625 		return;
2626 	}
2627 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2628 
2629 	/* avoid any races by locking the USB mutex */
2630 	USB_BUS_LOCK(xfer->xroot->bus);
2631 	xfer->flags.stall_pipe = 1;
2632 	USB_BUS_UNLOCK(xfer->xroot->bus);
2633 }
2634 
2635 int
2636 usbd_xfer_is_stalled(struct usb_xfer *xfer)
2637 {
2638 	return (xfer->endpoint->is_stalled);
2639 }
2640 
2641 /*------------------------------------------------------------------------*
2642  *	usbd_transfer_clear_stall
2643  *
2644  * This function is used to clear the stall flag outside the
2645  * callback. This function is NULL safe.
2646  *------------------------------------------------------------------------*/
2647 void
2648 usbd_transfer_clear_stall(struct usb_xfer *xfer)
2649 {
2650 	if (xfer == NULL) {
2651 		/* tearing down */
2652 		return;
2653 	}
2654 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2655 
2656 	/* avoid any races by locking the USB mutex */
2657 	USB_BUS_LOCK(xfer->xroot->bus);
2658 
2659 	xfer->flags.stall_pipe = 0;
2660 
2661 	USB_BUS_UNLOCK(xfer->xroot->bus);
2662 }
2663 
2664 /*------------------------------------------------------------------------*
2665  *	usbd_pipe_start
2666  *
2667  * This function is used to add an USB transfer to the pipe transfer list.
2668  *------------------------------------------------------------------------*/
2669 void
2670 usbd_pipe_start(struct usb_xfer_queue *pq)
2671 {
2672 	struct usb_endpoint *ep;
2673 	struct usb_xfer *xfer;
2674 	uint8_t type;
2675 
2676 	xfer = pq->curr;
2677 	ep = xfer->endpoint;
2678 
2679 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2680 
2681 	/*
2682 	 * If the endpoint is already stalled we do nothing !
2683 	 */
2684 	if (ep->is_stalled) {
2685 		return;
2686 	}
2687 	/*
2688 	 * Check if we are supposed to stall the endpoint:
2689 	 */
2690 	if (xfer->flags.stall_pipe) {
2691 		struct usb_device *udev;
2692 		struct usb_xfer_root *info;
2693 
2694 		/* clear stall command */
2695 		xfer->flags.stall_pipe = 0;
2696 
2697 		/* get pointer to USB device */
2698 		info = xfer->xroot;
2699 		udev = info->udev;
2700 
2701 		/*
2702 		 * Only stall BULK and INTERRUPT endpoints.
2703 		 */
2704 		type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2705 		if ((type == UE_BULK) ||
2706 		    (type == UE_INTERRUPT)) {
2707 			uint8_t did_stall;
2708 
2709 			did_stall = 1;
2710 
2711 			if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2712 				(udev->bus->methods->set_stall) (
2713 				    udev, ep, &did_stall);
2714 			} else if (udev->ctrl_xfer[1]) {
2715 				info = udev->ctrl_xfer[1]->xroot;
2716 				usb_proc_msignal(
2717 				    USB_BUS_CS_PROC(info->bus),
2718 				    &udev->cs_msg[0], &udev->cs_msg[1]);
2719 			} else {
2720 				/* should not happen */
2721 				DPRINTFN(0, "No stall handler\n");
2722 			}
2723 			/*
2724 			 * Check if we should stall. Some USB hardware
2725 			 * handles set- and clear-stall in hardware.
2726 			 */
2727 			if (did_stall) {
2728 				/*
2729 				 * The transfer will be continued when
2730 				 * the clear-stall control endpoint
2731 				 * message is received.
2732 				 */
2733 				ep->is_stalled = 1;
2734 				return;
2735 			}
2736 		} else if (type == UE_ISOCHRONOUS) {
2737 
2738 			/*
2739 			 * Make sure any FIFO overflow or other FIFO
2740 			 * error conditions go away by resetting the
2741 			 * endpoint FIFO through the clear stall
2742 			 * method.
2743 			 */
2744 			if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2745 				(udev->bus->methods->clear_stall) (udev, ep);
2746 			}
2747 		}
2748 	}
2749 	/* Set or clear stall complete - special case */
2750 	if (xfer->nframes == 0) {
2751 		/* we are complete */
2752 		xfer->aframes = 0;
2753 		usbd_transfer_done(xfer, 0);
2754 		return;
2755 	}
2756 	/*
2757 	 * Handled cases:
2758 	 *
2759 	 * 1) Start the first transfer queued.
2760 	 *
2761 	 * 2) Re-start the current USB transfer.
2762 	 */
2763 	/*
2764 	 * Check if there should be any
2765 	 * pre transfer start delay:
2766 	 */
2767 	if (xfer->interval > 0) {
2768 		type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2769 		if ((type == UE_BULK) ||
2770 		    (type == UE_CONTROL)) {
2771 			usbd_transfer_timeout_ms(xfer,
2772 			    &usbd_transfer_start_cb,
2773 			    xfer->interval);
2774 			return;
2775 		}
2776 	}
2777 	DPRINTF("start\n");
2778 
2779 #if USB_HAVE_PF
2780 	usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2781 #endif
2782 	/* the transfer can now be cancelled */
2783 	xfer->flags_int.can_cancel_immed = 1;
2784 
2785 	/* start USB transfer, if no error */
2786 	if (xfer->error == 0)
2787 		(ep->methods->start) (xfer);
2788 
2789 	/* check for transfer error */
2790 	if (xfer->error) {
2791 		/* some error has happened */
2792 		usbd_transfer_done(xfer, 0);
2793 	}
2794 }
2795 
2796 /*------------------------------------------------------------------------*
2797  *	usbd_transfer_timeout_ms
2798  *
2799  * This function is used to setup a timeout on the given USB
2800  * transfer. If the timeout has been deferred the callback given by
2801  * "cb" will get called after "ms" milliseconds.
2802  *------------------------------------------------------------------------*/
2803 void
2804 usbd_transfer_timeout_ms(struct usb_xfer *xfer,
2805     void (*cb) (void *arg), usb_timeout_t ms)
2806 {
2807 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2808 
2809 	/* defer delay */
2810 	usb_callout_reset(&xfer->timeout_handle,
2811 	    USB_MS_TO_TICKS(ms) + USB_CALLOUT_ZERO_TICKS, cb, xfer);
2812 }
2813 
2814 /*------------------------------------------------------------------------*
2815  *	usbd_callback_wrapper_sub
2816  *
2817  *  - This function will update variables in an USB transfer after
2818  *  that the USB transfer is complete.
2819  *
2820  *  - This function is used to start the next USB transfer on the
2821  *  ep transfer queue, if any.
2822  *
2823  * NOTE: In some special cases the USB transfer will not be removed from
2824  * the pipe queue, but remain first. To enforce USB transfer removal call
2825  * this function passing the error code "USB_ERR_CANCELLED".
2826  *
2827  * Return values:
2828  * 0: Success.
2829  * Else: The callback has been deferred.
2830  *------------------------------------------------------------------------*/
2831 static uint8_t
2832 usbd_callback_wrapper_sub(struct usb_xfer *xfer)
2833 {
2834 	struct usb_endpoint *ep;
2835 	struct usb_bus *bus;
2836 	usb_frcount_t x;
2837 
2838 	bus = xfer->xroot->bus;
2839 
2840 	if ((!xfer->flags_int.open) &&
2841 	    (!xfer->flags_int.did_close)) {
2842 		DPRINTF("close\n");
2843 		USB_BUS_LOCK(bus);
2844 		(xfer->endpoint->methods->close) (xfer);
2845 		USB_BUS_UNLOCK(bus);
2846 		/* only close once */
2847 		xfer->flags_int.did_close = 1;
2848 		return (1);		/* wait for new callback */
2849 	}
2850 	/*
2851 	 * If we have a non-hardware induced error we
2852 	 * need to do the DMA delay!
2853 	 */
2854 	if (xfer->error != 0 && !xfer->flags_int.did_dma_delay &&
2855 	    (xfer->error == USB_ERR_CANCELLED ||
2856 	    xfer->error == USB_ERR_TIMEOUT ||
2857 	    bus->methods->start_dma_delay != NULL)) {
2858 
2859 		usb_timeout_t temp;
2860 
2861 		/* only delay once */
2862 		xfer->flags_int.did_dma_delay = 1;
2863 
2864 		/* we can not cancel this delay */
2865 		xfer->flags_int.can_cancel_immed = 0;
2866 
2867 		temp = usbd_get_dma_delay(xfer->xroot->udev);
2868 
2869 		DPRINTFN(3, "DMA delay, %u ms, "
2870 		    "on %p\n", temp, xfer);
2871 
2872 		if (temp != 0) {
2873 			USB_BUS_LOCK(bus);
2874 			/*
2875 			 * Some hardware solutions have dedicated
2876 			 * events when it is safe to free DMA'ed
2877 			 * memory. For the other hardware platforms we
2878 			 * use a static delay.
2879 			 */
2880 			if (bus->methods->start_dma_delay != NULL) {
2881 				(bus->methods->start_dma_delay) (xfer);
2882 			} else {
2883 				usbd_transfer_timeout_ms(xfer,
2884 				    (void (*)(void *))&usb_dma_delay_done_cb,
2885 				    temp);
2886 			}
2887 			USB_BUS_UNLOCK(bus);
2888 			return (1);	/* wait for new callback */
2889 		}
2890 	}
2891 	/* check actual number of frames */
2892 	if (xfer->aframes > xfer->nframes) {
2893 		if (xfer->error == 0) {
2894 			panic("%s: actual number of frames, %d, is "
2895 			    "greater than initial number of frames, %d\n",
2896 			    __FUNCTION__, xfer->aframes, xfer->nframes);
2897 		} else {
2898 			/* just set some valid value */
2899 			xfer->aframes = xfer->nframes;
2900 		}
2901 	}
2902 	/* compute actual length */
2903 	xfer->actlen = 0;
2904 
2905 	for (x = 0; x != xfer->aframes; x++) {
2906 		xfer->actlen += xfer->frlengths[x];
2907 	}
2908 
2909 	/*
2910 	 * Frames that were not transferred get zero actual length in
2911 	 * case the USB device driver does not check the actual number
2912 	 * of frames transferred, "xfer->aframes":
2913 	 */
2914 	for (; x < xfer->nframes; x++) {
2915 		usbd_xfer_set_frame_len(xfer, x, 0);
2916 	}
2917 
2918 	/* check actual length */
2919 	if (xfer->actlen > xfer->sumlen) {
2920 		if (xfer->error == 0) {
2921 			panic("%s: actual length, %d, is greater than "
2922 			    "initial length, %d\n",
2923 			    __FUNCTION__, xfer->actlen, xfer->sumlen);
2924 		} else {
2925 			/* just set some valid value */
2926 			xfer->actlen = xfer->sumlen;
2927 		}
2928 	}
2929 	DPRINTFN(1, "xfer=%p endpoint=%p sts=%d alen=%d, slen=%d, afrm=%d, nfrm=%d\n",
2930 	    xfer, xfer->endpoint, xfer->error, xfer->actlen, xfer->sumlen,
2931 	    xfer->aframes, xfer->nframes);
2932 
2933 	if (xfer->error) {
2934 		/* end of control transfer, if any */
2935 		xfer->flags_int.control_act = 0;
2936 
2937 #if USB_HAVE_TT_SUPPORT
2938 		switch (xfer->error) {
2939 		case USB_ERR_NORMAL_COMPLETION:
2940 		case USB_ERR_SHORT_XFER:
2941 		case USB_ERR_STALLED:
2942 		case USB_ERR_CANCELLED:
2943 			/* nothing to do */
2944 			break;
2945 		default:
2946 			/* try to reset the TT, if any */
2947 			USB_BUS_LOCK(bus);
2948 			uhub_tt_buffer_reset_async_locked(xfer->xroot->udev, xfer->endpoint);
2949 			USB_BUS_UNLOCK(bus);
2950 			break;
2951 		}
2952 #endif
2953 		/* check if we should block the execution queue */
2954 		if ((xfer->error != USB_ERR_CANCELLED) &&
2955 		    (xfer->flags.pipe_bof)) {
2956 			DPRINTFN(2, "xfer=%p: Block On Failure "
2957 			    "on endpoint=%p\n", xfer, xfer->endpoint);
2958 			goto done;
2959 		}
2960 	} else {
2961 		/* check for short transfers */
2962 		if (xfer->actlen < xfer->sumlen) {
2963 
2964 			/* end of control transfer, if any */
2965 			xfer->flags_int.control_act = 0;
2966 
2967 			if (!xfer->flags_int.short_xfer_ok) {
2968 				xfer->error = USB_ERR_SHORT_XFER;
2969 				if (xfer->flags.pipe_bof) {
2970 					DPRINTFN(2, "xfer=%p: Block On Failure on "
2971 					    "Short Transfer on endpoint %p.\n",
2972 					    xfer, xfer->endpoint);
2973 					goto done;
2974 				}
2975 			}
2976 		} else {
2977 			/*
2978 			 * Check if we are in the middle of a
2979 			 * control transfer:
2980 			 */
2981 			if (xfer->flags_int.control_act) {
2982 				DPRINTFN(5, "xfer=%p: Control transfer "
2983 				    "active on endpoint=%p\n", xfer, xfer->endpoint);
2984 				goto done;
2985 			}
2986 		}
2987 	}
2988 
2989 	ep = xfer->endpoint;
2990 
2991 	/*
2992 	 * If the current USB transfer is completing we need to start the
2993 	 * next one:
2994 	 */
2995 	USB_BUS_LOCK(bus);
2996 	if (ep->endpoint_q[xfer->stream_id].curr == xfer) {
2997 		usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], NULL);
2998 
2999 		if (ep->endpoint_q[xfer->stream_id].curr != NULL ||
3000 		    TAILQ_FIRST(&ep->endpoint_q[xfer->stream_id].head) != NULL) {
3001 			/* there is another USB transfer waiting */
3002 		} else {
3003 			/* this is the last USB transfer */
3004 			/* clear isochronous sync flag */
3005 			xfer->endpoint->is_synced = 0;
3006 		}
3007 	}
3008 	USB_BUS_UNLOCK(bus);
3009 done:
3010 	return (0);
3011 }
3012 
3013 /*------------------------------------------------------------------------*
3014  *	usb_command_wrapper
3015  *
3016  * This function is used to execute commands non-recursivly on an USB
3017  * transfer.
3018  *------------------------------------------------------------------------*/
3019 void
3020 usb_command_wrapper(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
3021 {
3022 	if (xfer) {
3023 		/*
3024 		 * If the transfer is not already processing,
3025 		 * queue it!
3026 		 */
3027 		if (pq->curr != xfer) {
3028 			usbd_transfer_enqueue(pq, xfer);
3029 			if (pq->curr != NULL) {
3030 				/* something is already processing */
3031 				DPRINTFN(6, "busy %p\n", pq->curr);
3032 				return;
3033 			}
3034 		}
3035 	} else {
3036 		/* Get next element in queue */
3037 		pq->curr = NULL;
3038 	}
3039 
3040 	if (!pq->recurse_1) {
3041 
3042 		/* clear third recurse flag */
3043 		pq->recurse_3 = 0;
3044 
3045 		do {
3046 			/* set two first recurse flags */
3047 			pq->recurse_1 = 1;
3048 			pq->recurse_2 = 1;
3049 
3050 			if (pq->curr == NULL) {
3051 				xfer = TAILQ_FIRST(&pq->head);
3052 				if (xfer) {
3053 					TAILQ_REMOVE(&pq->head, xfer,
3054 					    wait_entry);
3055 					xfer->wait_queue = NULL;
3056 					pq->curr = xfer;
3057 				} else {
3058 					break;
3059 				}
3060 			}
3061 			DPRINTFN(6, "cb %p (enter)\n", pq->curr);
3062 			(pq->command) (pq);
3063 			DPRINTFN(6, "cb %p (leave)\n", pq->curr);
3064 
3065 			/*
3066 			 * Set third recurse flag to indicate
3067 			 * recursion happened:
3068 			 */
3069 			pq->recurse_3 = 1;
3070 
3071 		} while (!pq->recurse_2);
3072 
3073 		/* clear first recurse flag */
3074 		pq->recurse_1 = 0;
3075 
3076 	} else {
3077 		/* clear second recurse flag */
3078 		pq->recurse_2 = 0;
3079 	}
3080 }
3081 
3082 /*------------------------------------------------------------------------*
3083  *	usbd_ctrl_transfer_setup
3084  *
3085  * This function is used to setup the default USB control endpoint
3086  * transfer.
3087  *------------------------------------------------------------------------*/
3088 void
3089 usbd_ctrl_transfer_setup(struct usb_device *udev)
3090 {
3091 	struct usb_xfer *xfer;
3092 	uint8_t no_resetup;
3093 	uint8_t iface_index;
3094 
3095 	/* check for root HUB */
3096 	if (udev->parent_hub == NULL)
3097 		return;
3098 repeat:
3099 
3100 	xfer = udev->ctrl_xfer[0];
3101 	if (xfer) {
3102 		USB_XFER_LOCK(xfer);
3103 		no_resetup =
3104 		    ((xfer->address == udev->address) &&
3105 		    (udev->ctrl_ep_desc.wMaxPacketSize[0] ==
3106 		    udev->ddesc.bMaxPacketSize));
3107 		if (udev->flags.usb_mode == USB_MODE_DEVICE) {
3108 			if (no_resetup) {
3109 				/*
3110 				 * NOTE: checking "xfer->address" and
3111 				 * starting the USB transfer must be
3112 				 * atomic!
3113 				 */
3114 				usbd_transfer_start(xfer);
3115 			}
3116 		}
3117 		USB_XFER_UNLOCK(xfer);
3118 	} else {
3119 		no_resetup = 0;
3120 	}
3121 
3122 	if (no_resetup) {
3123 		/*
3124 	         * All parameters are exactly the same like before.
3125 	         * Just return.
3126 	         */
3127 		return;
3128 	}
3129 	/*
3130 	 * Update wMaxPacketSize for the default control endpoint:
3131 	 */
3132 	udev->ctrl_ep_desc.wMaxPacketSize[0] =
3133 	    udev->ddesc.bMaxPacketSize;
3134 
3135 	/*
3136 	 * Unsetup any existing USB transfer:
3137 	 */
3138 	usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX);
3139 
3140 	/*
3141 	 * Reset clear stall error counter.
3142 	 */
3143 	udev->clear_stall_errors = 0;
3144 
3145 	/*
3146 	 * Try to setup a new USB transfer for the
3147 	 * default control endpoint:
3148 	 */
3149 	iface_index = 0;
3150 	if (usbd_transfer_setup(udev, &iface_index,
3151 	    udev->ctrl_xfer, usb_control_ep_cfg, USB_CTRL_XFER_MAX, NULL,
3152 	    &udev->device_mtx)) {
3153 		DPRINTFN(0, "could not setup default "
3154 		    "USB transfer\n");
3155 	} else {
3156 		goto repeat;
3157 	}
3158 }
3159 
3160 /*------------------------------------------------------------------------*
3161  *	usbd_clear_data_toggle - factored out code
3162  *
3163  * NOTE: the intention of this function is not to reset the hardware
3164  * data toggle.
3165  *------------------------------------------------------------------------*/
3166 void
3167 usbd_clear_stall_locked(struct usb_device *udev, struct usb_endpoint *ep)
3168 {
3169 	USB_BUS_LOCK_ASSERT(udev->bus, MA_OWNED);
3170 
3171 	/* check that we have a valid case */
3172 	if (udev->flags.usb_mode == USB_MODE_HOST &&
3173 	    udev->parent_hub != NULL &&
3174 	    udev->bus->methods->clear_stall != NULL &&
3175 	    ep->methods != NULL) {
3176 		(udev->bus->methods->clear_stall) (udev, ep);
3177 	}
3178 }
3179 
3180 /*------------------------------------------------------------------------*
3181  *	usbd_clear_data_toggle - factored out code
3182  *
3183  * NOTE: the intention of this function is not to reset the hardware
3184  * data toggle on the USB device side.
3185  *------------------------------------------------------------------------*/
3186 void
3187 usbd_clear_data_toggle(struct usb_device *udev, struct usb_endpoint *ep)
3188 {
3189 	DPRINTFN(5, "udev=%p endpoint=%p\n", udev, ep);
3190 
3191 	USB_BUS_LOCK(udev->bus);
3192 	ep->toggle_next = 0;
3193 	/* some hardware needs a callback to clear the data toggle */
3194 	usbd_clear_stall_locked(udev, ep);
3195 	USB_BUS_UNLOCK(udev->bus);
3196 }
3197 
3198 /*------------------------------------------------------------------------*
3199  *	usbd_clear_stall_callback - factored out clear stall callback
3200  *
3201  * Input parameters:
3202  *  xfer1: Clear Stall Control Transfer
3203  *  xfer2: Stalled USB Transfer
3204  *
3205  * This function is NULL safe.
3206  *
3207  * Return values:
3208  *   0: In progress
3209  *   Else: Finished
3210  *
3211  * Clear stall config example:
3212  *
3213  * static const struct usb_config my_clearstall =  {
3214  *	.type = UE_CONTROL,
3215  *	.endpoint = 0,
3216  *	.direction = UE_DIR_ANY,
3217  *	.interval = 50, //50 milliseconds
3218  *	.bufsize = sizeof(struct usb_device_request),
3219  *	.timeout = 1000, //1.000 seconds
3220  *	.callback = &my_clear_stall_callback, // **
3221  *	.usb_mode = USB_MODE_HOST,
3222  * };
3223  *
3224  * ** "my_clear_stall_callback" calls "usbd_clear_stall_callback"
3225  * passing the correct parameters.
3226  *------------------------------------------------------------------------*/
3227 uint8_t
3228 usbd_clear_stall_callback(struct usb_xfer *xfer1,
3229     struct usb_xfer *xfer2)
3230 {
3231 	struct usb_device_request req;
3232 
3233 	if (xfer2 == NULL) {
3234 		/* looks like we are tearing down */
3235 		DPRINTF("NULL input parameter\n");
3236 		return (0);
3237 	}
3238 	USB_XFER_LOCK_ASSERT(xfer1, MA_OWNED);
3239 	USB_XFER_LOCK_ASSERT(xfer2, MA_OWNED);
3240 
3241 	switch (USB_GET_STATE(xfer1)) {
3242 	case USB_ST_SETUP:
3243 
3244 		/*
3245 		 * pre-clear the data toggle to DATA0 ("umass.c" and
3246 		 * "ata-usb.c" depends on this)
3247 		 */
3248 
3249 		usbd_clear_data_toggle(xfer2->xroot->udev, xfer2->endpoint);
3250 
3251 		/* setup a clear-stall packet */
3252 
3253 		req.bmRequestType = UT_WRITE_ENDPOINT;
3254 		req.bRequest = UR_CLEAR_FEATURE;
3255 		USETW(req.wValue, UF_ENDPOINT_HALT);
3256 		req.wIndex[0] = xfer2->endpoint->edesc->bEndpointAddress;
3257 		req.wIndex[1] = 0;
3258 		USETW(req.wLength, 0);
3259 
3260 		/*
3261 		 * "usbd_transfer_setup_sub()" will ensure that
3262 		 * we have sufficient room in the buffer for
3263 		 * the request structure!
3264 		 */
3265 
3266 		/* copy in the transfer */
3267 
3268 		usbd_copy_in(xfer1->frbuffers, 0, &req, sizeof(req));
3269 
3270 		/* set length */
3271 		xfer1->frlengths[0] = sizeof(req);
3272 		xfer1->nframes = 1;
3273 
3274 		usbd_transfer_submit(xfer1);
3275 		return (0);
3276 
3277 	case USB_ST_TRANSFERRED:
3278 		break;
3279 
3280 	default:			/* Error */
3281 		if (xfer1->error == USB_ERR_CANCELLED) {
3282 			return (0);
3283 		}
3284 		break;
3285 	}
3286 	return (1);			/* Clear Stall Finished */
3287 }
3288 
3289 /*------------------------------------------------------------------------*
3290  *	usbd_transfer_poll
3291  *
3292  * The following function gets called from the USB keyboard driver and
3293  * UMASS when the system has paniced.
3294  *
3295  * NOTE: It is currently not possible to resume normal operation on
3296  * the USB controller which has been polled, due to clearing of the
3297  * "up_dsleep" and "up_msleep" flags.
3298  *------------------------------------------------------------------------*/
3299 void
3300 usbd_transfer_poll(struct usb_xfer **ppxfer, uint16_t max)
3301 {
3302 	struct usb_xfer *xfer;
3303 	struct usb_xfer_root *xroot;
3304 	struct usb_device *udev;
3305 	struct usb_proc_msg *pm;
3306 	uint16_t n;
3307 	uint16_t drop_bus;
3308 	uint16_t drop_xfer;
3309 
3310 	for (n = 0; n != max; n++) {
3311 		/* Extra checks to avoid panic */
3312 		xfer = ppxfer[n];
3313 		if (xfer == NULL)
3314 			continue;	/* no USB transfer */
3315 		xroot = xfer->xroot;
3316 		if (xroot == NULL)
3317 			continue;	/* no USB root */
3318 		udev = xroot->udev;
3319 		if (udev == NULL)
3320 			continue;	/* no USB device */
3321 		if (udev->bus == NULL)
3322 			continue;	/* no BUS structure */
3323 		if (udev->bus->methods == NULL)
3324 			continue;	/* no BUS methods */
3325 		if (udev->bus->methods->xfer_poll == NULL)
3326 			continue;	/* no poll method */
3327 
3328 		/* make sure that the BUS mutex is not locked */
3329 		drop_bus = 0;
3330 		while (mtx_owned(&xroot->udev->bus->bus_mtx) && !SCHEDULER_STOPPED()) {
3331 			mtx_unlock(&xroot->udev->bus->bus_mtx);
3332 			drop_bus++;
3333 		}
3334 
3335 		/* make sure that the transfer mutex is not locked */
3336 		drop_xfer = 0;
3337 		while (mtx_owned(xroot->xfer_mtx) && !SCHEDULER_STOPPED()) {
3338 			mtx_unlock(xroot->xfer_mtx);
3339 			drop_xfer++;
3340 		}
3341 
3342 		/* Make sure cv_signal() and cv_broadcast() is not called */
3343 		USB_BUS_CONTROL_XFER_PROC(udev->bus)->up_msleep = 0;
3344 		USB_BUS_EXPLORE_PROC(udev->bus)->up_msleep = 0;
3345 		USB_BUS_GIANT_PROC(udev->bus)->up_msleep = 0;
3346 		USB_BUS_NON_GIANT_ISOC_PROC(udev->bus)->up_msleep = 0;
3347 		USB_BUS_NON_GIANT_BULK_PROC(udev->bus)->up_msleep = 0;
3348 
3349 		/* poll USB hardware */
3350 		(udev->bus->methods->xfer_poll) (udev->bus);
3351 
3352 		USB_BUS_LOCK(xroot->bus);
3353 
3354 		/* check for clear stall */
3355 		if (udev->ctrl_xfer[1] != NULL) {
3356 
3357 			/* poll clear stall start */
3358 			pm = &udev->cs_msg[0].hdr;
3359 			(pm->pm_callback) (pm);
3360 			/* poll clear stall done thread */
3361 			pm = &udev->ctrl_xfer[1]->
3362 			    xroot->done_m[0].hdr;
3363 			(pm->pm_callback) (pm);
3364 		}
3365 
3366 		/* poll done thread */
3367 		pm = &xroot->done_m[0].hdr;
3368 		(pm->pm_callback) (pm);
3369 
3370 		USB_BUS_UNLOCK(xroot->bus);
3371 
3372 		/* restore transfer mutex */
3373 		while (drop_xfer--)
3374 			mtx_lock(xroot->xfer_mtx);
3375 
3376 		/* restore BUS mutex */
3377 		while (drop_bus--)
3378 			mtx_lock(&xroot->udev->bus->bus_mtx);
3379 	}
3380 }
3381 
3382 static void
3383 usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
3384     uint8_t type, enum usb_dev_speed speed)
3385 {
3386 	static const uint16_t intr_range_max[USB_SPEED_MAX] = {
3387 		[USB_SPEED_LOW] = 8,
3388 		[USB_SPEED_FULL] = 64,
3389 		[USB_SPEED_HIGH] = 1024,
3390 		[USB_SPEED_VARIABLE] = 1024,
3391 		[USB_SPEED_SUPER] = 1024,
3392 	};
3393 
3394 	static const uint16_t isoc_range_max[USB_SPEED_MAX] = {
3395 		[USB_SPEED_LOW] = 0,	/* invalid */
3396 		[USB_SPEED_FULL] = 1023,
3397 		[USB_SPEED_HIGH] = 1024,
3398 		[USB_SPEED_VARIABLE] = 3584,
3399 		[USB_SPEED_SUPER] = 1024,
3400 	};
3401 
3402 	static const uint16_t control_min[USB_SPEED_MAX] = {
3403 		[USB_SPEED_LOW] = 8,
3404 		[USB_SPEED_FULL] = 8,
3405 		[USB_SPEED_HIGH] = 64,
3406 		[USB_SPEED_VARIABLE] = 512,
3407 		[USB_SPEED_SUPER] = 512,
3408 	};
3409 
3410 	static const uint16_t bulk_min[USB_SPEED_MAX] = {
3411 		[USB_SPEED_LOW] = 8,
3412 		[USB_SPEED_FULL] = 8,
3413 		[USB_SPEED_HIGH] = 512,
3414 		[USB_SPEED_VARIABLE] = 512,
3415 		[USB_SPEED_SUPER] = 1024,
3416 	};
3417 
3418 	uint16_t temp;
3419 
3420 	memset(ptr, 0, sizeof(*ptr));
3421 
3422 	switch (type) {
3423 	case UE_INTERRUPT:
3424 		ptr->range.max = intr_range_max[speed];
3425 		break;
3426 	case UE_ISOCHRONOUS:
3427 		ptr->range.max = isoc_range_max[speed];
3428 		break;
3429 	default:
3430 		if (type == UE_BULK)
3431 			temp = bulk_min[speed];
3432 		else /* UE_CONTROL */
3433 			temp = control_min[speed];
3434 
3435 		/* default is fixed */
3436 		ptr->fixed[0] = temp;
3437 		ptr->fixed[1] = temp;
3438 		ptr->fixed[2] = temp;
3439 		ptr->fixed[3] = temp;
3440 
3441 		if (speed == USB_SPEED_FULL) {
3442 			/* multiple sizes */
3443 			ptr->fixed[1] = 16;
3444 			ptr->fixed[2] = 32;
3445 			ptr->fixed[3] = 64;
3446 		}
3447 		if ((speed == USB_SPEED_VARIABLE) &&
3448 		    (type == UE_BULK)) {
3449 			/* multiple sizes */
3450 			ptr->fixed[2] = 1024;
3451 			ptr->fixed[3] = 1536;
3452 		}
3453 		break;
3454 	}
3455 }
3456 
3457 void	*
3458 usbd_xfer_softc(struct usb_xfer *xfer)
3459 {
3460 	return (xfer->priv_sc);
3461 }
3462 
3463 void *
3464 usbd_xfer_get_priv(struct usb_xfer *xfer)
3465 {
3466 	return (xfer->priv_fifo);
3467 }
3468 
3469 void
3470 usbd_xfer_set_priv(struct usb_xfer *xfer, void *ptr)
3471 {
3472 	xfer->priv_fifo = ptr;
3473 }
3474 
3475 uint8_t
3476 usbd_xfer_state(struct usb_xfer *xfer)
3477 {
3478 	return (xfer->usb_state);
3479 }
3480 
3481 void
3482 usbd_xfer_set_flag(struct usb_xfer *xfer, int flag)
3483 {
3484 	switch (flag) {
3485 		case USB_FORCE_SHORT_XFER:
3486 			xfer->flags.force_short_xfer = 1;
3487 			break;
3488 		case USB_SHORT_XFER_OK:
3489 			xfer->flags.short_xfer_ok = 1;
3490 			break;
3491 		case USB_MULTI_SHORT_OK:
3492 			xfer->flags.short_frames_ok = 1;
3493 			break;
3494 		case USB_MANUAL_STATUS:
3495 			xfer->flags.manual_status = 1;
3496 			break;
3497 	}
3498 }
3499 
3500 void
3501 usbd_xfer_clr_flag(struct usb_xfer *xfer, int flag)
3502 {
3503 	switch (flag) {
3504 		case USB_FORCE_SHORT_XFER:
3505 			xfer->flags.force_short_xfer = 0;
3506 			break;
3507 		case USB_SHORT_XFER_OK:
3508 			xfer->flags.short_xfer_ok = 0;
3509 			break;
3510 		case USB_MULTI_SHORT_OK:
3511 			xfer->flags.short_frames_ok = 0;
3512 			break;
3513 		case USB_MANUAL_STATUS:
3514 			xfer->flags.manual_status = 0;
3515 			break;
3516 	}
3517 }
3518 
3519 /*
3520  * The following function returns in milliseconds when the isochronous
3521  * transfer was completed by the hardware. The returned value wraps
3522  * around 65536 milliseconds.
3523  */
3524 uint16_t
3525 usbd_xfer_get_timestamp(struct usb_xfer *xfer)
3526 {
3527 	return (xfer->isoc_time_complete);
3528 }
3529 
3530 /*
3531  * The following function returns non-zero if the max packet size
3532  * field was clamped to a valid value. Else it returns zero.
3533  */
3534 uint8_t
3535 usbd_xfer_maxp_was_clamped(struct usb_xfer *xfer)
3536 {
3537 	return (xfer->flags_int.maxp_was_clamped);
3538 }
3539