xref: /freebsd/sys/dev/usb/usb_transfer.c (revision 21a4258d89a4e27632cfd87e5ad6e8538a6e77a2)
1 /* $FreeBSD$ */
2 /*-
3  * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #ifdef USB_GLOBAL_INCLUDE_FILE
28 #include USB_GLOBAL_INCLUDE_FILE
29 #else
30 #include <sys/stdint.h>
31 #include <sys/stddef.h>
32 #include <sys/param.h>
33 #include <sys/queue.h>
34 #include <sys/types.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/bus.h>
38 #include <sys/module.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/condvar.h>
42 #include <sys/sysctl.h>
43 #include <sys/sx.h>
44 #include <sys/unistd.h>
45 #include <sys/callout.h>
46 #include <sys/malloc.h>
47 #include <sys/priv.h>
48 
49 #include <dev/usb/usb.h>
50 #include <dev/usb/usbdi.h>
51 #include <dev/usb/usbdi_util.h>
52 
53 #define	USB_DEBUG_VAR usb_debug
54 
55 #include <dev/usb/usb_core.h>
56 #include <dev/usb/usb_busdma.h>
57 #include <dev/usb/usb_process.h>
58 #include <dev/usb/usb_transfer.h>
59 #include <dev/usb/usb_device.h>
60 #include <dev/usb/usb_debug.h>
61 #include <dev/usb/usb_util.h>
62 
63 #include <dev/usb/usb_controller.h>
64 #include <dev/usb/usb_bus.h>
65 #include <dev/usb/usb_pf.h>
66 #endif			/* USB_GLOBAL_INCLUDE_FILE */
67 
68 struct usb_std_packet_size {
69 	struct {
70 		uint16_t min;		/* inclusive */
71 		uint16_t max;		/* inclusive */
72 	}	range;
73 
74 	uint16_t fixed[4];
75 };
76 
77 static usb_callback_t usb_request_callback;
78 
79 static const struct usb_config usb_control_ep_cfg[USB_CTRL_XFER_MAX] = {
80 
81 	/* This transfer is used for generic control endpoint transfers */
82 
83 	[0] = {
84 		.type = UE_CONTROL,
85 		.endpoint = 0x00,	/* Control endpoint */
86 		.direction = UE_DIR_ANY,
87 		.bufsize = USB_EP0_BUFSIZE,	/* bytes */
88 		.flags = {.proxy_buffer = 1,},
89 		.callback = &usb_request_callback,
90 		.usb_mode = USB_MODE_DUAL,	/* both modes */
91 	},
92 
93 	/* This transfer is used for generic clear stall only */
94 
95 	[1] = {
96 		.type = UE_CONTROL,
97 		.endpoint = 0x00,	/* Control pipe */
98 		.direction = UE_DIR_ANY,
99 		.bufsize = sizeof(struct usb_device_request),
100 		.callback = &usb_do_clear_stall_callback,
101 		.timeout = 1000,	/* 1 second */
102 		.interval = 50,	/* 50ms */
103 		.usb_mode = USB_MODE_HOST,
104 	},
105 };
106 
107 /* function prototypes */
108 
109 static void	usbd_update_max_frame_size(struct usb_xfer *);
110 static void	usbd_transfer_unsetup_sub(struct usb_xfer_root *, uint8_t);
111 static void	usbd_control_transfer_init(struct usb_xfer *);
112 static int	usbd_setup_ctrl_transfer(struct usb_xfer *);
113 static void	usb_callback_proc(struct usb_proc_msg *);
114 static void	usbd_callback_ss_done_defer(struct usb_xfer *);
115 static void	usbd_callback_wrapper(struct usb_xfer_queue *);
116 static void	usbd_transfer_start_cb(void *);
117 static uint8_t	usbd_callback_wrapper_sub(struct usb_xfer *);
118 static void	usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
119 		    uint8_t type, enum usb_dev_speed speed);
120 
121 /*------------------------------------------------------------------------*
122  *	usb_request_callback
123  *------------------------------------------------------------------------*/
124 static void
125 usb_request_callback(struct usb_xfer *xfer, usb_error_t error)
126 {
127 	if (xfer->flags_int.usb_mode == USB_MODE_DEVICE)
128 		usb_handle_request_callback(xfer, error);
129 	else
130 		usbd_do_request_callback(xfer, error);
131 }
132 
133 /*------------------------------------------------------------------------*
134  *	usbd_update_max_frame_size
135  *
136  * This function updates the maximum frame size, hence high speed USB
137  * can transfer multiple consecutive packets.
138  *------------------------------------------------------------------------*/
139 static void
140 usbd_update_max_frame_size(struct usb_xfer *xfer)
141 {
142 	/* compute maximum frame size */
143 	/* this computation should not overflow 16-bit */
144 	/* max = 15 * 1024 */
145 
146 	xfer->max_frame_size = xfer->max_packet_size * xfer->max_packet_count;
147 }
148 
149 /*------------------------------------------------------------------------*
150  *	usbd_get_dma_delay
151  *
152  * The following function is called when we need to
153  * synchronize with DMA hardware.
154  *
155  * Returns:
156  *    0: no DMA delay required
157  * Else: milliseconds of DMA delay
158  *------------------------------------------------------------------------*/
159 usb_timeout_t
160 usbd_get_dma_delay(struct usb_device *udev)
161 {
162 	const struct usb_bus_methods *mtod;
163 	uint32_t temp;
164 
165 	mtod = udev->bus->methods;
166 	temp = 0;
167 
168 	if (mtod->get_dma_delay) {
169 		(mtod->get_dma_delay) (udev, &temp);
170 		/*
171 		 * Round up and convert to milliseconds. Note that we use
172 		 * 1024 milliseconds per second. to save a division.
173 		 */
174 		temp += 0x3FF;
175 		temp /= 0x400;
176 	}
177 	return (temp);
178 }
179 
180 /*------------------------------------------------------------------------*
181  *	usbd_transfer_setup_sub_malloc
182  *
183  * This function will allocate one or more DMA'able memory chunks
184  * according to "size", "align" and "count" arguments. "ppc" is
185  * pointed to a linear array of USB page caches afterwards.
186  *
187  * If the "align" argument is equal to "1" a non-contiguous allocation
188  * can happen. Else if the "align" argument is greater than "1", the
189  * allocation will always be contiguous in memory.
190  *
191  * Returns:
192  *    0: Success
193  * Else: Failure
194  *------------------------------------------------------------------------*/
195 #if USB_HAVE_BUSDMA
196 uint8_t
197 usbd_transfer_setup_sub_malloc(struct usb_setup_params *parm,
198     struct usb_page_cache **ppc, usb_size_t size, usb_size_t align,
199     usb_size_t count)
200 {
201 	struct usb_page_cache *pc;
202 	struct usb_page *pg;
203 	void *buf;
204 	usb_size_t n_dma_pc;
205 	usb_size_t n_dma_pg;
206 	usb_size_t n_obj;
207 	usb_size_t x;
208 	usb_size_t y;
209 	usb_size_t r;
210 	usb_size_t z;
211 
212 	USB_ASSERT(align > 0, ("Invalid alignment, 0x%08x\n",
213 	    align));
214 	USB_ASSERT(size > 0, ("Invalid size = 0\n"));
215 
216 	if (count == 0) {
217 		return (0);		/* nothing to allocate */
218 	}
219 	/*
220 	 * Make sure that the size is aligned properly.
221 	 */
222 	size = -((-size) & (-align));
223 
224 	/*
225 	 * Try multi-allocation chunks to reduce the number of DMA
226 	 * allocations, hence DMA allocations are slow.
227 	 */
228 	if (align == 1) {
229 		/* special case - non-cached multi page DMA memory */
230 		n_dma_pc = count;
231 		n_dma_pg = (2 + (size / USB_PAGE_SIZE));
232 		n_obj = 1;
233 	} else if (size >= USB_PAGE_SIZE) {
234 		n_dma_pc = count;
235 		n_dma_pg = 1;
236 		n_obj = 1;
237 	} else {
238 		/* compute number of objects per page */
239 #ifdef USB_DMA_SINGLE_ALLOC
240 		n_obj = 1;
241 #else
242 		n_obj = (USB_PAGE_SIZE / size);
243 #endif
244 		/*
245 		 * Compute number of DMA chunks, rounded up
246 		 * to nearest one:
247 		 */
248 		n_dma_pc = howmany(count, n_obj);
249 		n_dma_pg = 1;
250 	}
251 
252 	/*
253 	 * DMA memory is allocated once, but mapped twice. That's why
254 	 * there is one list for auto-free and another list for
255 	 * non-auto-free which only holds the mapping and not the
256 	 * allocation.
257 	 */
258 	if (parm->buf == NULL) {
259 		/* reserve memory (auto-free) */
260 		parm->dma_page_ptr += n_dma_pc * n_dma_pg;
261 		parm->dma_page_cache_ptr += n_dma_pc;
262 
263 		/* reserve memory (no-auto-free) */
264 		parm->dma_page_ptr += count * n_dma_pg;
265 		parm->xfer_page_cache_ptr += count;
266 		return (0);
267 	}
268 	for (x = 0; x != n_dma_pc; x++) {
269 		/* need to initialize the page cache */
270 		parm->dma_page_cache_ptr[x].tag_parent =
271 		    &parm->curr_xfer->xroot->dma_parent_tag;
272 	}
273 	for (x = 0; x != count; x++) {
274 		/* need to initialize the page cache */
275 		parm->xfer_page_cache_ptr[x].tag_parent =
276 		    &parm->curr_xfer->xroot->dma_parent_tag;
277 	}
278 
279 	if (ppc != NULL) {
280 		if (n_obj != 1)
281 			*ppc = parm->xfer_page_cache_ptr;
282 		else
283 			*ppc = parm->dma_page_cache_ptr;
284 	}
285 	r = count;			/* set remainder count */
286 	z = n_obj * size;		/* set allocation size */
287 	pc = parm->xfer_page_cache_ptr;
288 	pg = parm->dma_page_ptr;
289 
290 	if (n_obj == 1) {
291 	    /*
292 	     * Avoid mapping memory twice if only a single object
293 	     * should be allocated per page cache:
294 	     */
295 	    for (x = 0; x != n_dma_pc; x++) {
296 		if (usb_pc_alloc_mem(parm->dma_page_cache_ptr,
297 		    pg, z, align)) {
298 			return (1);	/* failure */
299 		}
300 		/* Make room for one DMA page cache and "n_dma_pg" pages */
301 		parm->dma_page_cache_ptr++;
302 		pg += n_dma_pg;
303 	    }
304 	} else {
305 	    for (x = 0; x != n_dma_pc; x++) {
306 
307 		if (r < n_obj) {
308 			/* compute last remainder */
309 			z = r * size;
310 			n_obj = r;
311 		}
312 		if (usb_pc_alloc_mem(parm->dma_page_cache_ptr,
313 		    pg, z, align)) {
314 			return (1);	/* failure */
315 		}
316 		/* Set beginning of current buffer */
317 		buf = parm->dma_page_cache_ptr->buffer;
318 		/* Make room for one DMA page cache and "n_dma_pg" pages */
319 		parm->dma_page_cache_ptr++;
320 		pg += n_dma_pg;
321 
322 		for (y = 0; (y != n_obj); y++, r--, pc++, pg += n_dma_pg) {
323 
324 			/* Load sub-chunk into DMA */
325 			if (usb_pc_dmamap_create(pc, size)) {
326 				return (1);	/* failure */
327 			}
328 			pc->buffer = USB_ADD_BYTES(buf, y * size);
329 			pc->page_start = pg;
330 
331 			USB_MTX_LOCK(pc->tag_parent->mtx);
332 			if (usb_pc_load_mem(pc, size, 1 /* synchronous */ )) {
333 				USB_MTX_UNLOCK(pc->tag_parent->mtx);
334 				return (1);	/* failure */
335 			}
336 			USB_MTX_UNLOCK(pc->tag_parent->mtx);
337 		}
338 	    }
339 	}
340 
341 	parm->xfer_page_cache_ptr = pc;
342 	parm->dma_page_ptr = pg;
343 	return (0);
344 }
345 #endif
346 
347 /*------------------------------------------------------------------------*
348  *	usbd_transfer_setup_sub - transfer setup subroutine
349  *
350  * This function must be called from the "xfer_setup" callback of the
351  * USB Host or Device controller driver when setting up an USB
352  * transfer. This function will setup correct packet sizes, buffer
353  * sizes, flags and more, that are stored in the "usb_xfer"
354  * structure.
355  *------------------------------------------------------------------------*/
356 void
357 usbd_transfer_setup_sub(struct usb_setup_params *parm)
358 {
359 	enum {
360 		REQ_SIZE = 8,
361 		MIN_PKT = 8,
362 	};
363 	struct usb_xfer *xfer = parm->curr_xfer;
364 	const struct usb_config *setup = parm->curr_setup;
365 	struct usb_endpoint_ss_comp_descriptor *ecomp;
366 	struct usb_endpoint_descriptor *edesc;
367 	struct usb_std_packet_size std_size;
368 	usb_frcount_t n_frlengths;
369 	usb_frcount_t n_frbuffers;
370 	usb_frcount_t x;
371 	uint16_t maxp_old;
372 	uint8_t type;
373 	uint8_t zmps;
374 
375 	/*
376 	 * Sanity check. The following parameters must be initialized before
377 	 * calling this function.
378 	 */
379 	if ((parm->hc_max_packet_size == 0) ||
380 	    (parm->hc_max_packet_count == 0) ||
381 	    (parm->hc_max_frame_size == 0)) {
382 		parm->err = USB_ERR_INVAL;
383 		goto done;
384 	}
385 	edesc = xfer->endpoint->edesc;
386 	ecomp = xfer->endpoint->ecomp;
387 
388 	type = (edesc->bmAttributes & UE_XFERTYPE);
389 
390 	xfer->flags = setup->flags;
391 	xfer->nframes = setup->frames;
392 	xfer->timeout = setup->timeout;
393 	xfer->callback = setup->callback;
394 	xfer->interval = setup->interval;
395 	xfer->endpointno = edesc->bEndpointAddress;
396 	xfer->max_packet_size = UGETW(edesc->wMaxPacketSize);
397 	xfer->max_packet_count = 1;
398 	/* make a shadow copy: */
399 	xfer->flags_int.usb_mode = parm->udev->flags.usb_mode;
400 
401 	parm->bufsize = setup->bufsize;
402 
403 	switch (parm->speed) {
404 	case USB_SPEED_HIGH:
405 		switch (type) {
406 		case UE_ISOCHRONOUS:
407 		case UE_INTERRUPT:
408 			xfer->max_packet_count +=
409 			    (xfer->max_packet_size >> 11) & 3;
410 
411 			/* check for invalid max packet count */
412 			if (xfer->max_packet_count > 3)
413 				xfer->max_packet_count = 3;
414 			break;
415 		default:
416 			break;
417 		}
418 		xfer->max_packet_size &= 0x7FF;
419 		break;
420 	case USB_SPEED_SUPER:
421 		xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3;
422 
423 		if (ecomp != NULL)
424 			xfer->max_packet_count += ecomp->bMaxBurst;
425 
426 		if ((xfer->max_packet_count == 0) ||
427 		    (xfer->max_packet_count > 16))
428 			xfer->max_packet_count = 16;
429 
430 		switch (type) {
431 		case UE_CONTROL:
432 			xfer->max_packet_count = 1;
433 			break;
434 		case UE_ISOCHRONOUS:
435 			if (ecomp != NULL) {
436 				uint8_t mult;
437 
438 				mult = UE_GET_SS_ISO_MULT(
439 				    ecomp->bmAttributes) + 1;
440 				if (mult > 3)
441 					mult = 3;
442 
443 				xfer->max_packet_count *= mult;
444 			}
445 			break;
446 		default:
447 			break;
448 		}
449 		xfer->max_packet_size &= 0x7FF;
450 		break;
451 	default:
452 		break;
453 	}
454 	/* range check "max_packet_count" */
455 
456 	if (xfer->max_packet_count > parm->hc_max_packet_count) {
457 		xfer->max_packet_count = parm->hc_max_packet_count;
458 	}
459 
460 	/* store max packet size value before filtering */
461 
462 	maxp_old = xfer->max_packet_size;
463 
464 	/* filter "wMaxPacketSize" according to HC capabilities */
465 
466 	if ((xfer->max_packet_size > parm->hc_max_packet_size) ||
467 	    (xfer->max_packet_size == 0)) {
468 		xfer->max_packet_size = parm->hc_max_packet_size;
469 	}
470 	/* filter "wMaxPacketSize" according to standard sizes */
471 
472 	usbd_get_std_packet_size(&std_size, type, parm->speed);
473 
474 	if (std_size.range.min || std_size.range.max) {
475 
476 		if (xfer->max_packet_size < std_size.range.min) {
477 			xfer->max_packet_size = std_size.range.min;
478 		}
479 		if (xfer->max_packet_size > std_size.range.max) {
480 			xfer->max_packet_size = std_size.range.max;
481 		}
482 	} else {
483 
484 		if (xfer->max_packet_size >= std_size.fixed[3]) {
485 			xfer->max_packet_size = std_size.fixed[3];
486 		} else if (xfer->max_packet_size >= std_size.fixed[2]) {
487 			xfer->max_packet_size = std_size.fixed[2];
488 		} else if (xfer->max_packet_size >= std_size.fixed[1]) {
489 			xfer->max_packet_size = std_size.fixed[1];
490 		} else {
491 			/* only one possibility left */
492 			xfer->max_packet_size = std_size.fixed[0];
493 		}
494 	}
495 
496 	/*
497 	 * Check if the max packet size was outside its allowed range
498 	 * and clamped to a valid value:
499 	 */
500 	if (maxp_old != xfer->max_packet_size)
501 		xfer->flags_int.maxp_was_clamped = 1;
502 
503 	/* compute "max_frame_size" */
504 
505 	usbd_update_max_frame_size(xfer);
506 
507 	/* check interrupt interval and transfer pre-delay */
508 
509 	if (type == UE_ISOCHRONOUS) {
510 
511 		uint16_t frame_limit;
512 
513 		xfer->interval = 0;	/* not used, must be zero */
514 		xfer->flags_int.isochronous_xfr = 1;	/* set flag */
515 
516 		if (xfer->timeout == 0) {
517 			/*
518 			 * set a default timeout in
519 			 * case something goes wrong!
520 			 */
521 			xfer->timeout = 1000 / 4;
522 		}
523 		switch (parm->speed) {
524 		case USB_SPEED_LOW:
525 		case USB_SPEED_FULL:
526 			frame_limit = USB_MAX_FS_ISOC_FRAMES_PER_XFER;
527 			xfer->fps_shift = 0;
528 			break;
529 		default:
530 			frame_limit = USB_MAX_HS_ISOC_FRAMES_PER_XFER;
531 			xfer->fps_shift = edesc->bInterval;
532 			if (xfer->fps_shift > 0)
533 				xfer->fps_shift--;
534 			if (xfer->fps_shift > 3)
535 				xfer->fps_shift = 3;
536 			if (xfer->flags.pre_scale_frames != 0)
537 				xfer->nframes <<= (3 - xfer->fps_shift);
538 			break;
539 		}
540 
541 		if (xfer->nframes > frame_limit) {
542 			/*
543 			 * this is not going to work
544 			 * cross hardware
545 			 */
546 			parm->err = USB_ERR_INVAL;
547 			goto done;
548 		}
549 		if (xfer->nframes == 0) {
550 			/*
551 			 * this is not a valid value
552 			 */
553 			parm->err = USB_ERR_ZERO_NFRAMES;
554 			goto done;
555 		}
556 	} else {
557 
558 		/*
559 		 * If a value is specified use that else check the
560 		 * endpoint descriptor!
561 		 */
562 		if (type == UE_INTERRUPT) {
563 
564 			uint32_t temp;
565 
566 			if (xfer->interval == 0) {
567 
568 				xfer->interval = edesc->bInterval;
569 
570 				switch (parm->speed) {
571 				case USB_SPEED_LOW:
572 				case USB_SPEED_FULL:
573 					break;
574 				default:
575 					/* 125us -> 1ms */
576 					if (xfer->interval < 4)
577 						xfer->interval = 1;
578 					else if (xfer->interval > 16)
579 						xfer->interval = (1 << (16 - 4));
580 					else
581 						xfer->interval =
582 						    (1 << (xfer->interval - 4));
583 					break;
584 				}
585 			}
586 
587 			if (xfer->interval == 0) {
588 				/*
589 				 * One millisecond is the smallest
590 				 * interval we support:
591 				 */
592 				xfer->interval = 1;
593 			}
594 
595 			xfer->fps_shift = 0;
596 			temp = 1;
597 
598 			while ((temp != 0) && (temp < xfer->interval)) {
599 				xfer->fps_shift++;
600 				temp *= 2;
601 			}
602 
603 			switch (parm->speed) {
604 			case USB_SPEED_LOW:
605 			case USB_SPEED_FULL:
606 				break;
607 			default:
608 				xfer->fps_shift += 3;
609 				break;
610 			}
611 		}
612 	}
613 
614 	/*
615 	 * NOTE: we do not allow "max_packet_size" or "max_frame_size"
616 	 * to be equal to zero when setting up USB transfers, hence
617 	 * this leads to a lot of extra code in the USB kernel.
618 	 */
619 
620 	if ((xfer->max_frame_size == 0) ||
621 	    (xfer->max_packet_size == 0)) {
622 
623 		zmps = 1;
624 
625 		if ((parm->bufsize <= MIN_PKT) &&
626 		    (type != UE_CONTROL) &&
627 		    (type != UE_BULK)) {
628 
629 			/* workaround */
630 			xfer->max_packet_size = MIN_PKT;
631 			xfer->max_packet_count = 1;
632 			parm->bufsize = 0;	/* automatic setup length */
633 			usbd_update_max_frame_size(xfer);
634 
635 		} else {
636 			parm->err = USB_ERR_ZERO_MAXP;
637 			goto done;
638 		}
639 
640 	} else {
641 		zmps = 0;
642 	}
643 
644 	/*
645 	 * check if we should setup a default
646 	 * length:
647 	 */
648 
649 	if (parm->bufsize == 0) {
650 
651 		parm->bufsize = xfer->max_frame_size;
652 
653 		if (type == UE_ISOCHRONOUS) {
654 			parm->bufsize *= xfer->nframes;
655 		}
656 	}
657 	/*
658 	 * check if we are about to setup a proxy
659 	 * type of buffer:
660 	 */
661 
662 	if (xfer->flags.proxy_buffer) {
663 
664 		/* round bufsize up */
665 
666 		parm->bufsize += (xfer->max_frame_size - 1);
667 
668 		if (parm->bufsize < xfer->max_frame_size) {
669 			/* length wrapped around */
670 			parm->err = USB_ERR_INVAL;
671 			goto done;
672 		}
673 		/* subtract remainder */
674 
675 		parm->bufsize -= (parm->bufsize % xfer->max_frame_size);
676 
677 		/* add length of USB device request structure, if any */
678 
679 		if (type == UE_CONTROL) {
680 			parm->bufsize += REQ_SIZE;	/* SETUP message */
681 		}
682 	}
683 	xfer->max_data_length = parm->bufsize;
684 
685 	/* Setup "n_frlengths" and "n_frbuffers" */
686 
687 	if (type == UE_ISOCHRONOUS) {
688 		n_frlengths = xfer->nframes;
689 		n_frbuffers = 1;
690 	} else {
691 
692 		if (type == UE_CONTROL) {
693 			xfer->flags_int.control_xfr = 1;
694 			if (xfer->nframes == 0) {
695 				if (parm->bufsize <= REQ_SIZE) {
696 					/*
697 					 * there will never be any data
698 					 * stage
699 					 */
700 					xfer->nframes = 1;
701 				} else {
702 					xfer->nframes = 2;
703 				}
704 			}
705 		} else {
706 			if (xfer->nframes == 0) {
707 				xfer->nframes = 1;
708 			}
709 		}
710 
711 		n_frlengths = xfer->nframes;
712 		n_frbuffers = xfer->nframes;
713 	}
714 
715 	/*
716 	 * check if we have room for the
717 	 * USB device request structure:
718 	 */
719 
720 	if (type == UE_CONTROL) {
721 
722 		if (xfer->max_data_length < REQ_SIZE) {
723 			/* length wrapped around or too small bufsize */
724 			parm->err = USB_ERR_INVAL;
725 			goto done;
726 		}
727 		xfer->max_data_length -= REQ_SIZE;
728 	}
729 	/*
730 	 * Setup "frlengths" and shadow "frlengths" for keeping the
731 	 * initial frame lengths when a USB transfer is complete. This
732 	 * information is useful when computing isochronous offsets.
733 	 */
734 	xfer->frlengths = parm->xfer_length_ptr;
735 	parm->xfer_length_ptr += 2 * n_frlengths;
736 
737 	/* setup "frbuffers" */
738 	xfer->frbuffers = parm->xfer_page_cache_ptr;
739 	parm->xfer_page_cache_ptr += n_frbuffers;
740 
741 	/* initialize max frame count */
742 	xfer->max_frame_count = xfer->nframes;
743 
744 	/*
745 	 * check if we need to setup
746 	 * a local buffer:
747 	 */
748 
749 	if (!xfer->flags.ext_buffer) {
750 #if USB_HAVE_BUSDMA
751 		struct usb_page_search page_info;
752 		struct usb_page_cache *pc;
753 
754 		if (usbd_transfer_setup_sub_malloc(parm,
755 		    &pc, parm->bufsize, 1, 1)) {
756 			parm->err = USB_ERR_NOMEM;
757 		} else if (parm->buf != NULL) {
758 
759 			usbd_get_page(pc, 0, &page_info);
760 
761 			xfer->local_buffer = page_info.buffer;
762 
763 			usbd_xfer_set_frame_offset(xfer, 0, 0);
764 
765 			if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
766 				usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
767 			}
768 		}
769 #else
770 		/* align data */
771 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
772 
773 		if (parm->buf != NULL) {
774 			xfer->local_buffer =
775 			    USB_ADD_BYTES(parm->buf, parm->size[0]);
776 
777 			usbd_xfer_set_frame_offset(xfer, 0, 0);
778 
779 			if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
780 				usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
781 			}
782 		}
783 		parm->size[0] += parm->bufsize;
784 
785 		/* align data again */
786 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
787 #endif
788 	}
789 	/*
790 	 * Compute maximum buffer size
791 	 */
792 
793 	if (parm->bufsize_max < parm->bufsize) {
794 		parm->bufsize_max = parm->bufsize;
795 	}
796 #if USB_HAVE_BUSDMA
797 	if (xfer->flags_int.bdma_enable) {
798 		/*
799 		 * Setup "dma_page_ptr".
800 		 *
801 		 * Proof for formula below:
802 		 *
803 		 * Assume there are three USB frames having length "a", "b" and
804 		 * "c". These USB frames will at maximum need "z"
805 		 * "usb_page" structures. "z" is given by:
806 		 *
807 		 * z = ((a / USB_PAGE_SIZE) + 2) + ((b / USB_PAGE_SIZE) + 2) +
808 		 * ((c / USB_PAGE_SIZE) + 2);
809 		 *
810 		 * Constraining "a", "b" and "c" like this:
811 		 *
812 		 * (a + b + c) <= parm->bufsize
813 		 *
814 		 * We know that:
815 		 *
816 		 * z <= ((parm->bufsize / USB_PAGE_SIZE) + (3*2));
817 		 *
818 		 * Here is the general formula:
819 		 */
820 		xfer->dma_page_ptr = parm->dma_page_ptr;
821 		parm->dma_page_ptr += (2 * n_frbuffers);
822 		parm->dma_page_ptr += (parm->bufsize / USB_PAGE_SIZE);
823 	}
824 #endif
825 	if (zmps) {
826 		/* correct maximum data length */
827 		xfer->max_data_length = 0;
828 	}
829 	/* subtract USB frame remainder from "hc_max_frame_size" */
830 
831 	xfer->max_hc_frame_size =
832 	    (parm->hc_max_frame_size -
833 	    (parm->hc_max_frame_size % xfer->max_frame_size));
834 
835 	if (xfer->max_hc_frame_size == 0) {
836 		parm->err = USB_ERR_INVAL;
837 		goto done;
838 	}
839 
840 	/* initialize frame buffers */
841 
842 	if (parm->buf) {
843 		for (x = 0; x != n_frbuffers; x++) {
844 			xfer->frbuffers[x].tag_parent =
845 			    &xfer->xroot->dma_parent_tag;
846 #if USB_HAVE_BUSDMA
847 			if (xfer->flags_int.bdma_enable &&
848 			    (parm->bufsize_max > 0)) {
849 
850 				if (usb_pc_dmamap_create(
851 				    xfer->frbuffers + x,
852 				    parm->bufsize_max)) {
853 					parm->err = USB_ERR_NOMEM;
854 					goto done;
855 				}
856 			}
857 #endif
858 		}
859 	}
860 done:
861 	if (parm->err) {
862 		/*
863 		 * Set some dummy values so that we avoid division by zero:
864 		 */
865 		xfer->max_hc_frame_size = 1;
866 		xfer->max_frame_size = 1;
867 		xfer->max_packet_size = 1;
868 		xfer->max_data_length = 0;
869 		xfer->nframes = 0;
870 		xfer->max_frame_count = 0;
871 	}
872 }
873 
874 static uint8_t
875 usbd_transfer_setup_has_bulk(const struct usb_config *setup_start,
876     uint16_t n_setup)
877 {
878 	while (n_setup--) {
879 		uint8_t type = setup_start[n_setup].type;
880 		if (type == UE_BULK || type == UE_BULK_INTR ||
881 		    type == UE_TYPE_ANY)
882 			return (1);
883 	}
884 	return (0);
885 }
886 
887 /*------------------------------------------------------------------------*
888  *	usbd_transfer_setup - setup an array of USB transfers
889  *
890  * NOTE: You must always call "usbd_transfer_unsetup" after calling
891  * "usbd_transfer_setup" if success was returned.
892  *
893  * The idea is that the USB device driver should pre-allocate all its
894  * transfers by one call to this function.
895  *
896  * Return values:
897  *    0: Success
898  * Else: Failure
899  *------------------------------------------------------------------------*/
900 usb_error_t
901 usbd_transfer_setup(struct usb_device *udev,
902     const uint8_t *ifaces, struct usb_xfer **ppxfer,
903     const struct usb_config *setup_start, uint16_t n_setup,
904     void *priv_sc, struct mtx *xfer_mtx)
905 {
906 	const struct usb_config *setup_end = setup_start + n_setup;
907 	const struct usb_config *setup;
908 	struct usb_setup_params *parm;
909 	struct usb_endpoint *ep;
910 	struct usb_xfer_root *info;
911 	struct usb_xfer *xfer;
912 	void *buf = NULL;
913 	usb_error_t error = 0;
914 	uint16_t n;
915 	uint16_t refcount;
916 	uint8_t do_unlock;
917 
918 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
919 	    "usbd_transfer_setup can sleep!");
920 
921 	/* do some checking first */
922 
923 	if (n_setup == 0) {
924 		DPRINTFN(6, "setup array has zero length!\n");
925 		return (USB_ERR_INVAL);
926 	}
927 	if (ifaces == NULL) {
928 		DPRINTFN(6, "ifaces array is NULL!\n");
929 		return (USB_ERR_INVAL);
930 	}
931 	if (xfer_mtx == NULL) {
932 		DPRINTFN(6, "using global lock\n");
933 		xfer_mtx = &Giant;
934 	}
935 
936 	/* more sanity checks */
937 
938 	for (setup = setup_start, n = 0;
939 	    setup != setup_end; setup++, n++) {
940 		if (setup->bufsize == (usb_frlength_t)-1) {
941 			error = USB_ERR_BAD_BUFSIZE;
942 			DPRINTF("invalid bufsize\n");
943 		}
944 		if (setup->callback == NULL) {
945 			error = USB_ERR_NO_CALLBACK;
946 			DPRINTF("no callback\n");
947 		}
948 		ppxfer[n] = NULL;
949 	}
950 
951 	if (error)
952 		return (error);
953 
954 	/* Protect scratch area */
955 	do_unlock = usbd_ctrl_lock(udev);
956 
957 	refcount = 0;
958 	info = NULL;
959 
960 	parm = &udev->scratch.xfer_setup[0].parm;
961 	memset(parm, 0, sizeof(*parm));
962 
963 	parm->udev = udev;
964 	parm->speed = usbd_get_speed(udev);
965 	parm->hc_max_packet_count = 1;
966 
967 	if (parm->speed >= USB_SPEED_MAX) {
968 		parm->err = USB_ERR_INVAL;
969 		goto done;
970 	}
971 	/* setup all transfers */
972 
973 	while (1) {
974 
975 		if (buf) {
976 			/*
977 			 * Initialize the "usb_xfer_root" structure,
978 			 * which is common for all our USB transfers.
979 			 */
980 			info = USB_ADD_BYTES(buf, 0);
981 
982 			info->memory_base = buf;
983 			info->memory_size = parm->size[0];
984 
985 #if USB_HAVE_BUSDMA
986 			info->dma_page_cache_start = USB_ADD_BYTES(buf, parm->size[4]);
987 			info->dma_page_cache_end = USB_ADD_BYTES(buf, parm->size[5]);
988 #endif
989 			info->xfer_page_cache_start = USB_ADD_BYTES(buf, parm->size[5]);
990 			info->xfer_page_cache_end = USB_ADD_BYTES(buf, parm->size[2]);
991 
992 			cv_init(&info->cv_drain, "WDRAIN");
993 
994 			info->xfer_mtx = xfer_mtx;
995 #if USB_HAVE_BUSDMA
996 			usb_dma_tag_setup(&info->dma_parent_tag,
997 			    parm->dma_tag_p, udev->bus->dma_parent_tag[0].tag,
998 			    xfer_mtx, &usb_bdma_done_event, udev->bus->dma_bits,
999 			    parm->dma_tag_max);
1000 #endif
1001 
1002 			info->bus = udev->bus;
1003 			info->udev = udev;
1004 
1005 			TAILQ_INIT(&info->done_q.head);
1006 			info->done_q.command = &usbd_callback_wrapper;
1007 #if USB_HAVE_BUSDMA
1008 			TAILQ_INIT(&info->dma_q.head);
1009 			info->dma_q.command = &usb_bdma_work_loop;
1010 #endif
1011 			info->done_m[0].hdr.pm_callback = &usb_callback_proc;
1012 			info->done_m[0].xroot = info;
1013 			info->done_m[1].hdr.pm_callback = &usb_callback_proc;
1014 			info->done_m[1].xroot = info;
1015 
1016 			/*
1017 			 * In device side mode control endpoint
1018 			 * requests need to run from a separate
1019 			 * context, else there is a chance of
1020 			 * deadlock!
1021 			 */
1022 			if (setup_start == usb_control_ep_cfg)
1023 				info->done_p =
1024 				    USB_BUS_CONTROL_XFER_PROC(udev->bus);
1025 			else if (xfer_mtx == &Giant)
1026 				info->done_p =
1027 				    USB_BUS_GIANT_PROC(udev->bus);
1028 			else if (usbd_transfer_setup_has_bulk(setup_start, n_setup))
1029 				info->done_p =
1030 				    USB_BUS_NON_GIANT_BULK_PROC(udev->bus);
1031 			else
1032 				info->done_p =
1033 				    USB_BUS_NON_GIANT_ISOC_PROC(udev->bus);
1034 		}
1035 		/* reset sizes */
1036 
1037 		parm->size[0] = 0;
1038 		parm->buf = buf;
1039 		parm->size[0] += sizeof(info[0]);
1040 
1041 		for (setup = setup_start, n = 0;
1042 		    setup != setup_end; setup++, n++) {
1043 
1044 			/* skip USB transfers without callbacks: */
1045 			if (setup->callback == NULL) {
1046 				continue;
1047 			}
1048 			/* see if there is a matching endpoint */
1049 			ep = usbd_get_endpoint(udev,
1050 			    ifaces[setup->if_index], setup);
1051 
1052 			/*
1053 			 * Check that the USB PIPE is valid and that
1054 			 * the endpoint mode is proper.
1055 			 *
1056 			 * Make sure we don't allocate a streams
1057 			 * transfer when such a combination is not
1058 			 * valid.
1059 			 */
1060 			if ((ep == NULL) || (ep->methods == NULL) ||
1061 			    ((ep->ep_mode != USB_EP_MODE_STREAMS) &&
1062 			    (ep->ep_mode != USB_EP_MODE_DEFAULT)) ||
1063 			    (setup->stream_id != 0 &&
1064 			    (setup->stream_id >= USB_MAX_EP_STREAMS ||
1065 			    (ep->ep_mode != USB_EP_MODE_STREAMS)))) {
1066 				if (setup->flags.no_pipe_ok)
1067 					continue;
1068 				if ((setup->usb_mode != USB_MODE_DUAL) &&
1069 				    (setup->usb_mode != udev->flags.usb_mode))
1070 					continue;
1071 				parm->err = USB_ERR_NO_PIPE;
1072 				goto done;
1073 			}
1074 
1075 			/* align data properly */
1076 			parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1077 
1078 			/* store current setup pointer */
1079 			parm->curr_setup = setup;
1080 
1081 			if (buf) {
1082 				/*
1083 				 * Common initialization of the
1084 				 * "usb_xfer" structure.
1085 				 */
1086 				xfer = USB_ADD_BYTES(buf, parm->size[0]);
1087 				xfer->address = udev->address;
1088 				xfer->priv_sc = priv_sc;
1089 				xfer->xroot = info;
1090 
1091 				usb_callout_init_mtx(&xfer->timeout_handle,
1092 				    &udev->bus->bus_mtx, 0);
1093 			} else {
1094 				/*
1095 				 * Setup a dummy xfer, hence we are
1096 				 * writing to the "usb_xfer"
1097 				 * structure pointed to by "xfer"
1098 				 * before we have allocated any
1099 				 * memory:
1100 				 */
1101 				xfer = &udev->scratch.xfer_setup[0].dummy;
1102 				memset(xfer, 0, sizeof(*xfer));
1103 				refcount++;
1104 			}
1105 
1106 			/* set transfer endpoint pointer */
1107 			xfer->endpoint = ep;
1108 
1109 			/* set transfer stream ID */
1110 			xfer->stream_id = setup->stream_id;
1111 
1112 			parm->size[0] += sizeof(xfer[0]);
1113 			parm->methods = xfer->endpoint->methods;
1114 			parm->curr_xfer = xfer;
1115 
1116 			/*
1117 			 * Call the Host or Device controller transfer
1118 			 * setup routine:
1119 			 */
1120 			(udev->bus->methods->xfer_setup) (parm);
1121 
1122 			/* check for error */
1123 			if (parm->err)
1124 				goto done;
1125 
1126 			if (buf) {
1127 				/*
1128 				 * Increment the endpoint refcount. This
1129 				 * basically prevents setting a new
1130 				 * configuration and alternate setting
1131 				 * when USB transfers are in use on
1132 				 * the given interface. Search the USB
1133 				 * code for "endpoint->refcount_alloc" if you
1134 				 * want more information.
1135 				 */
1136 				USB_BUS_LOCK(info->bus);
1137 				if (xfer->endpoint->refcount_alloc >= USB_EP_REF_MAX)
1138 					parm->err = USB_ERR_INVAL;
1139 
1140 				xfer->endpoint->refcount_alloc++;
1141 
1142 				if (xfer->endpoint->refcount_alloc == 0)
1143 					panic("usbd_transfer_setup(): Refcount wrapped to zero\n");
1144 				USB_BUS_UNLOCK(info->bus);
1145 
1146 				/*
1147 				 * Whenever we set ppxfer[] then we
1148 				 * also need to increment the
1149 				 * "setup_refcount":
1150 				 */
1151 				info->setup_refcount++;
1152 
1153 				/*
1154 				 * Transfer is successfully setup and
1155 				 * can be used:
1156 				 */
1157 				ppxfer[n] = xfer;
1158 			}
1159 
1160 			/* check for error */
1161 			if (parm->err)
1162 				goto done;
1163 		}
1164 
1165 		if (buf != NULL || parm->err != 0)
1166 			goto done;
1167 
1168 		/* if no transfers, nothing to do */
1169 		if (refcount == 0)
1170 			goto done;
1171 
1172 		/* align data properly */
1173 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1174 
1175 		/* store offset temporarily */
1176 		parm->size[1] = parm->size[0];
1177 
1178 		/*
1179 		 * The number of DMA tags required depends on
1180 		 * the number of endpoints. The current estimate
1181 		 * for maximum number of DMA tags per endpoint
1182 		 * is three:
1183 		 * 1) for loading memory
1184 		 * 2) for allocating memory
1185 		 * 3) for fixing memory [UHCI]
1186 		 */
1187 		parm->dma_tag_max += 3 * MIN(n_setup, USB_EP_MAX);
1188 
1189 		/*
1190 		 * DMA tags for QH, TD, Data and more.
1191 		 */
1192 		parm->dma_tag_max += 8;
1193 
1194 		parm->dma_tag_p += parm->dma_tag_max;
1195 
1196 		parm->size[0] += ((uint8_t *)parm->dma_tag_p) -
1197 		    ((uint8_t *)0);
1198 
1199 		/* align data properly */
1200 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1201 
1202 		/* store offset temporarily */
1203 		parm->size[3] = parm->size[0];
1204 
1205 		parm->size[0] += ((uint8_t *)parm->dma_page_ptr) -
1206 		    ((uint8_t *)0);
1207 
1208 		/* align data properly */
1209 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1210 
1211 		/* store offset temporarily */
1212 		parm->size[4] = parm->size[0];
1213 
1214 		parm->size[0] += ((uint8_t *)parm->dma_page_cache_ptr) -
1215 		    ((uint8_t *)0);
1216 
1217 		/* store end offset temporarily */
1218 		parm->size[5] = parm->size[0];
1219 
1220 		parm->size[0] += ((uint8_t *)parm->xfer_page_cache_ptr) -
1221 		    ((uint8_t *)0);
1222 
1223 		/* store end offset temporarily */
1224 
1225 		parm->size[2] = parm->size[0];
1226 
1227 		/* align data properly */
1228 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1229 
1230 		parm->size[6] = parm->size[0];
1231 
1232 		parm->size[0] += ((uint8_t *)parm->xfer_length_ptr) -
1233 		    ((uint8_t *)0);
1234 
1235 		/* align data properly */
1236 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1237 
1238 		/* allocate zeroed memory */
1239 		buf = malloc(parm->size[0], M_USB, M_WAITOK | M_ZERO);
1240 
1241 		if (buf == NULL) {
1242 			parm->err = USB_ERR_NOMEM;
1243 			DPRINTFN(0, "cannot allocate memory block for "
1244 			    "configuration (%d bytes)\n",
1245 			    parm->size[0]);
1246 			goto done;
1247 		}
1248 		parm->dma_tag_p = USB_ADD_BYTES(buf, parm->size[1]);
1249 		parm->dma_page_ptr = USB_ADD_BYTES(buf, parm->size[3]);
1250 		parm->dma_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[4]);
1251 		parm->xfer_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[5]);
1252 		parm->xfer_length_ptr = USB_ADD_BYTES(buf, parm->size[6]);
1253 	}
1254 
1255 done:
1256 	if (buf) {
1257 		if (info->setup_refcount == 0) {
1258 			/*
1259 			 * "usbd_transfer_unsetup_sub" will unlock
1260 			 * the bus mutex before returning !
1261 			 */
1262 			USB_BUS_LOCK(info->bus);
1263 
1264 			/* something went wrong */
1265 			usbd_transfer_unsetup_sub(info, 0);
1266 		}
1267 	}
1268 
1269 	/* check if any errors happened */
1270 	if (parm->err)
1271 		usbd_transfer_unsetup(ppxfer, n_setup);
1272 
1273 	error = parm->err;
1274 
1275 	if (do_unlock)
1276 		usbd_ctrl_unlock(udev);
1277 
1278 	return (error);
1279 }
1280 
1281 /*------------------------------------------------------------------------*
1282  *	usbd_transfer_unsetup_sub - factored out code
1283  *------------------------------------------------------------------------*/
1284 static void
1285 usbd_transfer_unsetup_sub(struct usb_xfer_root *info, uint8_t needs_delay)
1286 {
1287 #if USB_HAVE_BUSDMA
1288 	struct usb_page_cache *pc;
1289 #endif
1290 
1291 	USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
1292 
1293 	/* wait for any outstanding DMA operations */
1294 
1295 	if (needs_delay) {
1296 		usb_timeout_t temp;
1297 		temp = usbd_get_dma_delay(info->udev);
1298 		if (temp != 0) {
1299 			usb_pause_mtx(&info->bus->bus_mtx,
1300 			    USB_MS_TO_TICKS(temp));
1301 		}
1302 	}
1303 
1304 	/* make sure that our done messages are not queued anywhere */
1305 	usb_proc_mwait(info->done_p, &info->done_m[0], &info->done_m[1]);
1306 
1307 	USB_BUS_UNLOCK(info->bus);
1308 
1309 #if USB_HAVE_BUSDMA
1310 	/* free DMA'able memory, if any */
1311 	pc = info->dma_page_cache_start;
1312 	while (pc != info->dma_page_cache_end) {
1313 		usb_pc_free_mem(pc);
1314 		pc++;
1315 	}
1316 
1317 	/* free DMA maps in all "xfer->frbuffers" */
1318 	pc = info->xfer_page_cache_start;
1319 	while (pc != info->xfer_page_cache_end) {
1320 		usb_pc_dmamap_destroy(pc);
1321 		pc++;
1322 	}
1323 
1324 	/* free all DMA tags */
1325 	usb_dma_tag_unsetup(&info->dma_parent_tag);
1326 #endif
1327 
1328 	cv_destroy(&info->cv_drain);
1329 
1330 	/*
1331 	 * free the "memory_base" last, hence the "info" structure is
1332 	 * contained within the "memory_base"!
1333 	 */
1334 	free(info->memory_base, M_USB);
1335 }
1336 
1337 /*------------------------------------------------------------------------*
1338  *	usbd_transfer_unsetup - unsetup/free an array of USB transfers
1339  *
1340  * NOTE: All USB transfers in progress will get called back passing
1341  * the error code "USB_ERR_CANCELLED" before this function
1342  * returns.
1343  *------------------------------------------------------------------------*/
1344 void
1345 usbd_transfer_unsetup(struct usb_xfer **pxfer, uint16_t n_setup)
1346 {
1347 	struct usb_xfer *xfer;
1348 	struct usb_xfer_root *info;
1349 	uint8_t needs_delay = 0;
1350 
1351 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1352 	    "usbd_transfer_unsetup can sleep!");
1353 
1354 	while (n_setup--) {
1355 		xfer = pxfer[n_setup];
1356 
1357 		if (xfer == NULL)
1358 			continue;
1359 
1360 		info = xfer->xroot;
1361 
1362 		USB_XFER_LOCK(xfer);
1363 		USB_BUS_LOCK(info->bus);
1364 
1365 		/*
1366 		 * HINT: when you start/stop a transfer, it might be a
1367 		 * good idea to directly use the "pxfer[]" structure:
1368 		 *
1369 		 * usbd_transfer_start(sc->pxfer[0]);
1370 		 * usbd_transfer_stop(sc->pxfer[0]);
1371 		 *
1372 		 * That way, if your code has many parts that will not
1373 		 * stop running under the same lock, in other words
1374 		 * "xfer_mtx", the usbd_transfer_start and
1375 		 * usbd_transfer_stop functions will simply return
1376 		 * when they detect a NULL pointer argument.
1377 		 *
1378 		 * To avoid any races we clear the "pxfer[]" pointer
1379 		 * while holding the private mutex of the driver:
1380 		 */
1381 		pxfer[n_setup] = NULL;
1382 
1383 		USB_BUS_UNLOCK(info->bus);
1384 		USB_XFER_UNLOCK(xfer);
1385 
1386 		usbd_transfer_drain(xfer);
1387 
1388 #if USB_HAVE_BUSDMA
1389 		if (xfer->flags_int.bdma_enable)
1390 			needs_delay = 1;
1391 #endif
1392 		/*
1393 		 * NOTE: default endpoint does not have an
1394 		 * interface, even if endpoint->iface_index == 0
1395 		 */
1396 		USB_BUS_LOCK(info->bus);
1397 		xfer->endpoint->refcount_alloc--;
1398 		USB_BUS_UNLOCK(info->bus);
1399 
1400 		usb_callout_drain(&xfer->timeout_handle);
1401 
1402 		USB_BUS_LOCK(info->bus);
1403 
1404 		USB_ASSERT(info->setup_refcount != 0, ("Invalid setup "
1405 		    "reference count\n"));
1406 
1407 		info->setup_refcount--;
1408 
1409 		if (info->setup_refcount == 0) {
1410 			usbd_transfer_unsetup_sub(info,
1411 			    needs_delay);
1412 		} else {
1413 			USB_BUS_UNLOCK(info->bus);
1414 		}
1415 	}
1416 }
1417 
1418 /*------------------------------------------------------------------------*
1419  *	usbd_control_transfer_init - factored out code
1420  *
1421  * In USB Device Mode we have to wait for the SETUP packet which
1422  * containst the "struct usb_device_request" structure, before we can
1423  * transfer any data. In USB Host Mode we already have the SETUP
1424  * packet at the moment the USB transfer is started. This leads us to
1425  * having to setup the USB transfer at two different places in
1426  * time. This function just contains factored out control transfer
1427  * initialisation code, so that we don't duplicate the code.
1428  *------------------------------------------------------------------------*/
1429 static void
1430 usbd_control_transfer_init(struct usb_xfer *xfer)
1431 {
1432 	struct usb_device_request req;
1433 
1434 	/* copy out the USB request header */
1435 
1436 	usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1437 
1438 	/* setup remainder */
1439 
1440 	xfer->flags_int.control_rem = UGETW(req.wLength);
1441 
1442 	/* copy direction to endpoint variable */
1443 
1444 	xfer->endpointno &= ~(UE_DIR_IN | UE_DIR_OUT);
1445 	xfer->endpointno |=
1446 	    (req.bmRequestType & UT_READ) ? UE_DIR_IN : UE_DIR_OUT;
1447 }
1448 
1449 /*------------------------------------------------------------------------*
1450  *	usbd_control_transfer_did_data
1451  *
1452  * This function returns non-zero if a control endpoint has
1453  * transferred the first DATA packet after the SETUP packet.
1454  * Else it returns zero.
1455  *------------------------------------------------------------------------*/
1456 static uint8_t
1457 usbd_control_transfer_did_data(struct usb_xfer *xfer)
1458 {
1459 	struct usb_device_request req;
1460 
1461 	/* SETUP packet is not yet sent */
1462 	if (xfer->flags_int.control_hdr != 0)
1463 		return (0);
1464 
1465 	/* copy out the USB request header */
1466 	usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1467 
1468 	/* compare remainder to the initial value */
1469 	return (xfer->flags_int.control_rem != UGETW(req.wLength));
1470 }
1471 
1472 /*------------------------------------------------------------------------*
1473  *	usbd_setup_ctrl_transfer
1474  *
1475  * This function handles initialisation of control transfers. Control
1476  * transfers are special in that regard that they can both transmit
1477  * and receive data.
1478  *
1479  * Return values:
1480  *    0: Success
1481  * Else: Failure
1482  *------------------------------------------------------------------------*/
1483 static int
1484 usbd_setup_ctrl_transfer(struct usb_xfer *xfer)
1485 {
1486 	usb_frlength_t len;
1487 
1488 	/* Check for control endpoint stall */
1489 	if (xfer->flags.stall_pipe && xfer->flags_int.control_act) {
1490 		/* the control transfer is no longer active */
1491 		xfer->flags_int.control_stall = 1;
1492 		xfer->flags_int.control_act = 0;
1493 	} else {
1494 		/* don't stall control transfer by default */
1495 		xfer->flags_int.control_stall = 0;
1496 	}
1497 
1498 	/* Check for invalid number of frames */
1499 	if (xfer->nframes > 2) {
1500 		/*
1501 		 * If you need to split a control transfer, you
1502 		 * have to do one part at a time. Only with
1503 		 * non-control transfers you can do multiple
1504 		 * parts a time.
1505 		 */
1506 		DPRINTFN(0, "Too many frames: %u\n",
1507 		    (unsigned int)xfer->nframes);
1508 		goto error;
1509 	}
1510 
1511 	/*
1512          * Check if there is a control
1513          * transfer in progress:
1514          */
1515 	if (xfer->flags_int.control_act) {
1516 
1517 		if (xfer->flags_int.control_hdr) {
1518 
1519 			/* clear send header flag */
1520 
1521 			xfer->flags_int.control_hdr = 0;
1522 
1523 			/* setup control transfer */
1524 			if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1525 				usbd_control_transfer_init(xfer);
1526 			}
1527 		}
1528 		/* get data length */
1529 
1530 		len = xfer->sumlen;
1531 
1532 	} else {
1533 
1534 		/* the size of the SETUP structure is hardcoded ! */
1535 
1536 		if (xfer->frlengths[0] != sizeof(struct usb_device_request)) {
1537 			DPRINTFN(0, "Wrong framelength %u != %zu\n",
1538 			    xfer->frlengths[0], sizeof(struct
1539 			    usb_device_request));
1540 			goto error;
1541 		}
1542 		/* check USB mode */
1543 		if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1544 
1545 			/* check number of frames */
1546 			if (xfer->nframes != 1) {
1547 				/*
1548 			         * We need to receive the setup
1549 			         * message first so that we know the
1550 			         * data direction!
1551 			         */
1552 				DPRINTF("Misconfigured transfer\n");
1553 				goto error;
1554 			}
1555 			/*
1556 			 * Set a dummy "control_rem" value.  This
1557 			 * variable will be overwritten later by a
1558 			 * call to "usbd_control_transfer_init()" !
1559 			 */
1560 			xfer->flags_int.control_rem = 0xFFFF;
1561 		} else {
1562 
1563 			/* setup "endpoint" and "control_rem" */
1564 
1565 			usbd_control_transfer_init(xfer);
1566 		}
1567 
1568 		/* set transfer-header flag */
1569 
1570 		xfer->flags_int.control_hdr = 1;
1571 
1572 		/* get data length */
1573 
1574 		len = (xfer->sumlen - sizeof(struct usb_device_request));
1575 	}
1576 
1577 	/* update did data flag */
1578 
1579 	xfer->flags_int.control_did_data =
1580 	    usbd_control_transfer_did_data(xfer);
1581 
1582 	/* check if there is a length mismatch */
1583 
1584 	if (len > xfer->flags_int.control_rem) {
1585 		DPRINTFN(0, "Length (%d) greater than "
1586 		    "remaining length (%d)\n", len,
1587 		    xfer->flags_int.control_rem);
1588 		goto error;
1589 	}
1590 	/* check if we are doing a short transfer */
1591 
1592 	if (xfer->flags.force_short_xfer) {
1593 		xfer->flags_int.control_rem = 0;
1594 	} else {
1595 		if ((len != xfer->max_data_length) &&
1596 		    (len != xfer->flags_int.control_rem) &&
1597 		    (xfer->nframes != 1)) {
1598 			DPRINTFN(0, "Short control transfer without "
1599 			    "force_short_xfer set\n");
1600 			goto error;
1601 		}
1602 		xfer->flags_int.control_rem -= len;
1603 	}
1604 
1605 	/* the status part is executed when "control_act" is 0 */
1606 
1607 	if ((xfer->flags_int.control_rem > 0) ||
1608 	    (xfer->flags.manual_status)) {
1609 		/* don't execute the STATUS stage yet */
1610 		xfer->flags_int.control_act = 1;
1611 
1612 		/* sanity check */
1613 		if ((!xfer->flags_int.control_hdr) &&
1614 		    (xfer->nframes == 1)) {
1615 			/*
1616 		         * This is not a valid operation!
1617 		         */
1618 			DPRINTFN(0, "Invalid parameter "
1619 			    "combination\n");
1620 			goto error;
1621 		}
1622 	} else {
1623 		/* time to execute the STATUS stage */
1624 		xfer->flags_int.control_act = 0;
1625 	}
1626 	return (0);			/* success */
1627 
1628 error:
1629 	return (1);			/* failure */
1630 }
1631 
1632 /*------------------------------------------------------------------------*
1633  *	usbd_transfer_submit - start USB hardware for the given transfer
1634  *
1635  * This function should only be called from the USB callback.
1636  *------------------------------------------------------------------------*/
1637 void
1638 usbd_transfer_submit(struct usb_xfer *xfer)
1639 {
1640 	struct usb_xfer_root *info;
1641 	struct usb_bus *bus;
1642 	usb_frcount_t x;
1643 
1644 	info = xfer->xroot;
1645 	bus = info->bus;
1646 
1647 	DPRINTF("xfer=%p, endpoint=%p, nframes=%d, dir=%s\n",
1648 	    xfer, xfer->endpoint, xfer->nframes, USB_GET_DATA_ISREAD(xfer) ?
1649 	    "read" : "write");
1650 
1651 #ifdef USB_DEBUG
1652 	if (USB_DEBUG_VAR > 0) {
1653 		USB_BUS_LOCK(bus);
1654 
1655 		usb_dump_endpoint(xfer->endpoint);
1656 
1657 		USB_BUS_UNLOCK(bus);
1658 	}
1659 #endif
1660 
1661 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1662 	USB_BUS_LOCK_ASSERT(bus, MA_NOTOWNED);
1663 
1664 	/* Only open the USB transfer once! */
1665 	if (!xfer->flags_int.open) {
1666 		xfer->flags_int.open = 1;
1667 
1668 		DPRINTF("open\n");
1669 
1670 		USB_BUS_LOCK(bus);
1671 		(xfer->endpoint->methods->open) (xfer);
1672 		USB_BUS_UNLOCK(bus);
1673 	}
1674 	/* set "transferring" flag */
1675 	xfer->flags_int.transferring = 1;
1676 
1677 #if USB_HAVE_POWERD
1678 	/* increment power reference */
1679 	usbd_transfer_power_ref(xfer, 1);
1680 #endif
1681 	/*
1682 	 * Check if the transfer is waiting on a queue, most
1683 	 * frequently the "done_q":
1684 	 */
1685 	if (xfer->wait_queue) {
1686 		USB_BUS_LOCK(bus);
1687 		usbd_transfer_dequeue(xfer);
1688 		USB_BUS_UNLOCK(bus);
1689 	}
1690 	/* clear "did_dma_delay" flag */
1691 	xfer->flags_int.did_dma_delay = 0;
1692 
1693 	/* clear "did_close" flag */
1694 	xfer->flags_int.did_close = 0;
1695 
1696 #if USB_HAVE_BUSDMA
1697 	/* clear "bdma_setup" flag */
1698 	xfer->flags_int.bdma_setup = 0;
1699 #endif
1700 	/* by default we cannot cancel any USB transfer immediately */
1701 	xfer->flags_int.can_cancel_immed = 0;
1702 
1703 	/* clear lengths and frame counts by default */
1704 	xfer->sumlen = 0;
1705 	xfer->actlen = 0;
1706 	xfer->aframes = 0;
1707 
1708 	/* clear any previous errors */
1709 	xfer->error = 0;
1710 
1711 	/* Check if the device is still alive */
1712 	if (info->udev->state < USB_STATE_POWERED) {
1713 		USB_BUS_LOCK(bus);
1714 		/*
1715 		 * Must return cancelled error code else
1716 		 * device drivers can hang.
1717 		 */
1718 		usbd_transfer_done(xfer, USB_ERR_CANCELLED);
1719 		USB_BUS_UNLOCK(bus);
1720 		return;
1721 	}
1722 
1723 	/* sanity check */
1724 	if (xfer->nframes == 0) {
1725 		if (xfer->flags.stall_pipe) {
1726 			/*
1727 			 * Special case - want to stall without transferring
1728 			 * any data:
1729 			 */
1730 			DPRINTF("xfer=%p nframes=0: stall "
1731 			    "or clear stall!\n", xfer);
1732 			USB_BUS_LOCK(bus);
1733 			xfer->flags_int.can_cancel_immed = 1;
1734 			/* start the transfer */
1735 			usb_command_wrapper(&xfer->endpoint->
1736 			    endpoint_q[xfer->stream_id], xfer);
1737 			USB_BUS_UNLOCK(bus);
1738 			return;
1739 		}
1740 		USB_BUS_LOCK(bus);
1741 		usbd_transfer_done(xfer, USB_ERR_INVAL);
1742 		USB_BUS_UNLOCK(bus);
1743 		return;
1744 	}
1745 	/* compute some variables */
1746 
1747 	for (x = 0; x != xfer->nframes; x++) {
1748 		/* make a copy of the frlenghts[] */
1749 		xfer->frlengths[x + xfer->max_frame_count] = xfer->frlengths[x];
1750 		/* compute total transfer length */
1751 		xfer->sumlen += xfer->frlengths[x];
1752 		if (xfer->sumlen < xfer->frlengths[x]) {
1753 			/* length wrapped around */
1754 			USB_BUS_LOCK(bus);
1755 			usbd_transfer_done(xfer, USB_ERR_INVAL);
1756 			USB_BUS_UNLOCK(bus);
1757 			return;
1758 		}
1759 	}
1760 
1761 	/* clear some internal flags */
1762 
1763 	xfer->flags_int.short_xfer_ok = 0;
1764 	xfer->flags_int.short_frames_ok = 0;
1765 
1766 	/* check if this is a control transfer */
1767 
1768 	if (xfer->flags_int.control_xfr) {
1769 
1770 		if (usbd_setup_ctrl_transfer(xfer)) {
1771 			USB_BUS_LOCK(bus);
1772 			usbd_transfer_done(xfer, USB_ERR_STALLED);
1773 			USB_BUS_UNLOCK(bus);
1774 			return;
1775 		}
1776 	}
1777 	/*
1778 	 * Setup filtered version of some transfer flags,
1779 	 * in case of data read direction
1780 	 */
1781 	if (USB_GET_DATA_ISREAD(xfer)) {
1782 
1783 		if (xfer->flags.short_frames_ok) {
1784 			xfer->flags_int.short_xfer_ok = 1;
1785 			xfer->flags_int.short_frames_ok = 1;
1786 		} else if (xfer->flags.short_xfer_ok) {
1787 			xfer->flags_int.short_xfer_ok = 1;
1788 
1789 			/* check for control transfer */
1790 			if (xfer->flags_int.control_xfr) {
1791 				/*
1792 				 * 1) Control transfers do not support
1793 				 * reception of multiple short USB
1794 				 * frames in host mode and device side
1795 				 * mode, with exception of:
1796 				 *
1797 				 * 2) Due to sometimes buggy device
1798 				 * side firmware we need to do a
1799 				 * STATUS stage in case of short
1800 				 * control transfers in USB host mode.
1801 				 * The STATUS stage then becomes the
1802 				 * "alt_next" to the DATA stage.
1803 				 */
1804 				xfer->flags_int.short_frames_ok = 1;
1805 			}
1806 		}
1807 	}
1808 	/*
1809 	 * Check if BUS-DMA support is enabled and try to load virtual
1810 	 * buffers into DMA, if any:
1811 	 */
1812 #if USB_HAVE_BUSDMA
1813 	if (xfer->flags_int.bdma_enable) {
1814 		/* insert the USB transfer last in the BUS-DMA queue */
1815 		usb_command_wrapper(&xfer->xroot->dma_q, xfer);
1816 		return;
1817 	}
1818 #endif
1819 	/*
1820 	 * Enter the USB transfer into the Host Controller or
1821 	 * Device Controller schedule:
1822 	 */
1823 	usbd_pipe_enter(xfer);
1824 }
1825 
1826 /*------------------------------------------------------------------------*
1827  *	usbd_pipe_enter - factored out code
1828  *------------------------------------------------------------------------*/
1829 void
1830 usbd_pipe_enter(struct usb_xfer *xfer)
1831 {
1832 	struct usb_endpoint *ep;
1833 
1834 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1835 
1836 	USB_BUS_LOCK(xfer->xroot->bus);
1837 
1838 	ep = xfer->endpoint;
1839 
1840 	DPRINTF("enter\n");
1841 
1842 	/* the transfer can now be cancelled */
1843 	xfer->flags_int.can_cancel_immed = 1;
1844 
1845 	/* enter the transfer */
1846 	(ep->methods->enter) (xfer);
1847 
1848 	/* check for transfer error */
1849 	if (xfer->error) {
1850 		/* some error has happened */
1851 		usbd_transfer_done(xfer, 0);
1852 		USB_BUS_UNLOCK(xfer->xroot->bus);
1853 		return;
1854 	}
1855 
1856 	/* start the transfer */
1857 	usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], xfer);
1858 	USB_BUS_UNLOCK(xfer->xroot->bus);
1859 }
1860 
1861 /*------------------------------------------------------------------------*
1862  *	usbd_transfer_start - start an USB transfer
1863  *
1864  * NOTE: Calling this function more than one time will only
1865  *       result in a single transfer start, until the USB transfer
1866  *       completes.
1867  *------------------------------------------------------------------------*/
1868 void
1869 usbd_transfer_start(struct usb_xfer *xfer)
1870 {
1871 	if (xfer == NULL) {
1872 		/* transfer is gone */
1873 		return;
1874 	}
1875 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1876 
1877 	/* mark the USB transfer started */
1878 
1879 	if (!xfer->flags_int.started) {
1880 		/* lock the BUS lock to avoid races updating flags_int */
1881 		USB_BUS_LOCK(xfer->xroot->bus);
1882 		xfer->flags_int.started = 1;
1883 		USB_BUS_UNLOCK(xfer->xroot->bus);
1884 	}
1885 	/* check if the USB transfer callback is already transferring */
1886 
1887 	if (xfer->flags_int.transferring) {
1888 		return;
1889 	}
1890 	USB_BUS_LOCK(xfer->xroot->bus);
1891 	/* call the USB transfer callback */
1892 	usbd_callback_ss_done_defer(xfer);
1893 	USB_BUS_UNLOCK(xfer->xroot->bus);
1894 }
1895 
1896 /*------------------------------------------------------------------------*
1897  *	usbd_transfer_stop - stop an USB transfer
1898  *
1899  * NOTE: Calling this function more than one time will only
1900  *       result in a single transfer stop.
1901  * NOTE: When this function returns it is not safe to free nor
1902  *       reuse any DMA buffers. See "usbd_transfer_drain()".
1903  *------------------------------------------------------------------------*/
1904 void
1905 usbd_transfer_stop(struct usb_xfer *xfer)
1906 {
1907 	struct usb_endpoint *ep;
1908 
1909 	if (xfer == NULL) {
1910 		/* transfer is gone */
1911 		return;
1912 	}
1913 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1914 
1915 	/* check if the USB transfer was ever opened */
1916 
1917 	if (!xfer->flags_int.open) {
1918 		if (xfer->flags_int.started) {
1919 			/* nothing to do except clearing the "started" flag */
1920 			/* lock the BUS lock to avoid races updating flags_int */
1921 			USB_BUS_LOCK(xfer->xroot->bus);
1922 			xfer->flags_int.started = 0;
1923 			USB_BUS_UNLOCK(xfer->xroot->bus);
1924 		}
1925 		return;
1926 	}
1927 	/* try to stop the current USB transfer */
1928 
1929 	USB_BUS_LOCK(xfer->xroot->bus);
1930 	/* override any previous error */
1931 	xfer->error = USB_ERR_CANCELLED;
1932 
1933 	/*
1934 	 * Clear "open" and "started" when both private and USB lock
1935 	 * is locked so that we don't get a race updating "flags_int"
1936 	 */
1937 	xfer->flags_int.open = 0;
1938 	xfer->flags_int.started = 0;
1939 
1940 	/*
1941 	 * Check if we can cancel the USB transfer immediately.
1942 	 */
1943 	if (xfer->flags_int.transferring) {
1944 		if (xfer->flags_int.can_cancel_immed &&
1945 		    (!xfer->flags_int.did_close)) {
1946 			DPRINTF("close\n");
1947 			/*
1948 			 * The following will lead to an USB_ERR_CANCELLED
1949 			 * error code being passed to the USB callback.
1950 			 */
1951 			(xfer->endpoint->methods->close) (xfer);
1952 			/* only close once */
1953 			xfer->flags_int.did_close = 1;
1954 		} else {
1955 			/* need to wait for the next done callback */
1956 		}
1957 	} else {
1958 		DPRINTF("close\n");
1959 
1960 		/* close here and now */
1961 		(xfer->endpoint->methods->close) (xfer);
1962 
1963 		/*
1964 		 * Any additional DMA delay is done by
1965 		 * "usbd_transfer_unsetup()".
1966 		 */
1967 
1968 		/*
1969 		 * Special case. Check if we need to restart a blocked
1970 		 * endpoint.
1971 		 */
1972 		ep = xfer->endpoint;
1973 
1974 		/*
1975 		 * If the current USB transfer is completing we need
1976 		 * to start the next one:
1977 		 */
1978 		if (ep->endpoint_q[xfer->stream_id].curr == xfer) {
1979 			usb_command_wrapper(
1980 			    &ep->endpoint_q[xfer->stream_id], NULL);
1981 		}
1982 	}
1983 
1984 	USB_BUS_UNLOCK(xfer->xroot->bus);
1985 }
1986 
1987 /*------------------------------------------------------------------------*
1988  *	usbd_transfer_pending
1989  *
1990  * This function will check if an USB transfer is pending which is a
1991  * little bit complicated!
1992  * Return values:
1993  * 0: Not pending
1994  * 1: Pending: The USB transfer will receive a callback in the future.
1995  *------------------------------------------------------------------------*/
1996 uint8_t
1997 usbd_transfer_pending(struct usb_xfer *xfer)
1998 {
1999 	struct usb_xfer_root *info;
2000 	struct usb_xfer_queue *pq;
2001 
2002 	if (xfer == NULL) {
2003 		/* transfer is gone */
2004 		return (0);
2005 	}
2006 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2007 
2008 	if (xfer->flags_int.transferring) {
2009 		/* trivial case */
2010 		return (1);
2011 	}
2012 	USB_BUS_LOCK(xfer->xroot->bus);
2013 	if (xfer->wait_queue) {
2014 		/* we are waiting on a queue somewhere */
2015 		USB_BUS_UNLOCK(xfer->xroot->bus);
2016 		return (1);
2017 	}
2018 	info = xfer->xroot;
2019 	pq = &info->done_q;
2020 
2021 	if (pq->curr == xfer) {
2022 		/* we are currently scheduled for callback */
2023 		USB_BUS_UNLOCK(xfer->xroot->bus);
2024 		return (1);
2025 	}
2026 	/* we are not pending */
2027 	USB_BUS_UNLOCK(xfer->xroot->bus);
2028 	return (0);
2029 }
2030 
2031 /*------------------------------------------------------------------------*
2032  *	usbd_transfer_drain
2033  *
2034  * This function will stop the USB transfer and wait for any
2035  * additional BUS-DMA and HW-DMA operations to complete. Buffers that
2036  * are loaded into DMA can safely be freed or reused after that this
2037  * function has returned.
2038  *------------------------------------------------------------------------*/
2039 void
2040 usbd_transfer_drain(struct usb_xfer *xfer)
2041 {
2042 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2043 	    "usbd_transfer_drain can sleep!");
2044 
2045 	if (xfer == NULL) {
2046 		/* transfer is gone */
2047 		return;
2048 	}
2049 	if (xfer->xroot->xfer_mtx != &Giant) {
2050 		USB_XFER_LOCK_ASSERT(xfer, MA_NOTOWNED);
2051 	}
2052 	USB_XFER_LOCK(xfer);
2053 
2054 	usbd_transfer_stop(xfer);
2055 
2056 	while (usbd_transfer_pending(xfer) ||
2057 	    xfer->flags_int.doing_callback) {
2058 
2059 		/*
2060 		 * It is allowed that the callback can drop its
2061 		 * transfer mutex. In that case checking only
2062 		 * "usbd_transfer_pending()" is not enough to tell if
2063 		 * the USB transfer is fully drained. We also need to
2064 		 * check the internal "doing_callback" flag.
2065 		 */
2066 		xfer->flags_int.draining = 1;
2067 
2068 		/*
2069 		 * Wait until the current outstanding USB
2070 		 * transfer is complete !
2071 		 */
2072 		cv_wait(&xfer->xroot->cv_drain, xfer->xroot->xfer_mtx);
2073 	}
2074 	USB_XFER_UNLOCK(xfer);
2075 }
2076 
2077 struct usb_page_cache *
2078 usbd_xfer_get_frame(struct usb_xfer *xfer, usb_frcount_t frindex)
2079 {
2080 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2081 
2082 	return (&xfer->frbuffers[frindex]);
2083 }
2084 
2085 void *
2086 usbd_xfer_get_frame_buffer(struct usb_xfer *xfer, usb_frcount_t frindex)
2087 {
2088 	struct usb_page_search page_info;
2089 
2090 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2091 
2092 	usbd_get_page(&xfer->frbuffers[frindex], 0, &page_info);
2093 	return (page_info.buffer);
2094 }
2095 
2096 /*------------------------------------------------------------------------*
2097  *	usbd_xfer_get_fps_shift
2098  *
2099  * The following function is only useful for isochronous transfers. It
2100  * returns how many times the frame execution rate has been shifted
2101  * down.
2102  *
2103  * Return value:
2104  * Success: 0..3
2105  * Failure: 0
2106  *------------------------------------------------------------------------*/
2107 uint8_t
2108 usbd_xfer_get_fps_shift(struct usb_xfer *xfer)
2109 {
2110 	return (xfer->fps_shift);
2111 }
2112 
2113 usb_frlength_t
2114 usbd_xfer_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex)
2115 {
2116 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2117 
2118 	return (xfer->frlengths[frindex]);
2119 }
2120 
2121 /*------------------------------------------------------------------------*
2122  *	usbd_xfer_set_frame_data
2123  *
2124  * This function sets the pointer of the buffer that should
2125  * loaded directly into DMA for the given USB frame. Passing "ptr"
2126  * equal to NULL while the corresponding "frlength" is greater
2127  * than zero gives undefined results!
2128  *------------------------------------------------------------------------*/
2129 void
2130 usbd_xfer_set_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
2131     void *ptr, usb_frlength_t len)
2132 {
2133 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2134 
2135 	/* set virtual address to load and length */
2136 	xfer->frbuffers[frindex].buffer = ptr;
2137 	usbd_xfer_set_frame_len(xfer, frindex, len);
2138 }
2139 
2140 void
2141 usbd_xfer_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
2142     void **ptr, int *len)
2143 {
2144 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2145 
2146 	if (ptr != NULL)
2147 		*ptr = xfer->frbuffers[frindex].buffer;
2148 	if (len != NULL)
2149 		*len = xfer->frlengths[frindex];
2150 }
2151 
2152 /*------------------------------------------------------------------------*
2153  *	usbd_xfer_old_frame_length
2154  *
2155  * This function returns the framelength of the given frame at the
2156  * time the transfer was submitted. This function can be used to
2157  * compute the starting data pointer of the next isochronous frame
2158  * when an isochronous transfer has completed.
2159  *------------------------------------------------------------------------*/
2160 usb_frlength_t
2161 usbd_xfer_old_frame_length(struct usb_xfer *xfer, usb_frcount_t frindex)
2162 {
2163 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2164 
2165 	return (xfer->frlengths[frindex + xfer->max_frame_count]);
2166 }
2167 
2168 void
2169 usbd_xfer_status(struct usb_xfer *xfer, int *actlen, int *sumlen, int *aframes,
2170     int *nframes)
2171 {
2172 	if (actlen != NULL)
2173 		*actlen = xfer->actlen;
2174 	if (sumlen != NULL)
2175 		*sumlen = xfer->sumlen;
2176 	if (aframes != NULL)
2177 		*aframes = xfer->aframes;
2178 	if (nframes != NULL)
2179 		*nframes = xfer->nframes;
2180 }
2181 
2182 /*------------------------------------------------------------------------*
2183  *	usbd_xfer_set_frame_offset
2184  *
2185  * This function sets the frame data buffer offset relative to the beginning
2186  * of the USB DMA buffer allocated for this USB transfer.
2187  *------------------------------------------------------------------------*/
2188 void
2189 usbd_xfer_set_frame_offset(struct usb_xfer *xfer, usb_frlength_t offset,
2190     usb_frcount_t frindex)
2191 {
2192 	KASSERT(!xfer->flags.ext_buffer, ("Cannot offset data frame "
2193 	    "when the USB buffer is external\n"));
2194 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2195 
2196 	/* set virtual address to load */
2197 	xfer->frbuffers[frindex].buffer =
2198 	    USB_ADD_BYTES(xfer->local_buffer, offset);
2199 }
2200 
2201 void
2202 usbd_xfer_set_interval(struct usb_xfer *xfer, int i)
2203 {
2204 	xfer->interval = i;
2205 }
2206 
2207 void
2208 usbd_xfer_set_timeout(struct usb_xfer *xfer, int t)
2209 {
2210 	xfer->timeout = t;
2211 }
2212 
2213 void
2214 usbd_xfer_set_frames(struct usb_xfer *xfer, usb_frcount_t n)
2215 {
2216 	xfer->nframes = n;
2217 }
2218 
2219 usb_frcount_t
2220 usbd_xfer_max_frames(struct usb_xfer *xfer)
2221 {
2222 	return (xfer->max_frame_count);
2223 }
2224 
2225 usb_frlength_t
2226 usbd_xfer_max_len(struct usb_xfer *xfer)
2227 {
2228 	return (xfer->max_data_length);
2229 }
2230 
2231 usb_frlength_t
2232 usbd_xfer_max_framelen(struct usb_xfer *xfer)
2233 {
2234 	return (xfer->max_frame_size);
2235 }
2236 
2237 void
2238 usbd_xfer_set_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex,
2239     usb_frlength_t len)
2240 {
2241 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2242 
2243 	xfer->frlengths[frindex] = len;
2244 }
2245 
2246 /*------------------------------------------------------------------------*
2247  *	usb_callback_proc - factored out code
2248  *
2249  * This function performs USB callbacks.
2250  *------------------------------------------------------------------------*/
2251 static void
2252 usb_callback_proc(struct usb_proc_msg *_pm)
2253 {
2254 	struct usb_done_msg *pm = (void *)_pm;
2255 	struct usb_xfer_root *info = pm->xroot;
2256 
2257 	/* Change locking order */
2258 	USB_BUS_UNLOCK(info->bus);
2259 
2260 	/*
2261 	 * We exploit the fact that the mutex is the same for all
2262 	 * callbacks that will be called from this thread:
2263 	 */
2264 	USB_MTX_LOCK(info->xfer_mtx);
2265 	USB_BUS_LOCK(info->bus);
2266 
2267 	/* Continue where we lost track */
2268 	usb_command_wrapper(&info->done_q,
2269 	    info->done_q.curr);
2270 
2271 	USB_MTX_UNLOCK(info->xfer_mtx);
2272 }
2273 
2274 /*------------------------------------------------------------------------*
2275  *	usbd_callback_ss_done_defer
2276  *
2277  * This function will defer the start, stop and done callback to the
2278  * correct thread.
2279  *------------------------------------------------------------------------*/
2280 static void
2281 usbd_callback_ss_done_defer(struct usb_xfer *xfer)
2282 {
2283 	struct usb_xfer_root *info = xfer->xroot;
2284 	struct usb_xfer_queue *pq = &info->done_q;
2285 
2286 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2287 
2288 	if (pq->curr != xfer) {
2289 		usbd_transfer_enqueue(pq, xfer);
2290 	}
2291 	if (!pq->recurse_1) {
2292 
2293 		/*
2294 	         * We have to postpone the callback due to the fact we
2295 	         * will have a Lock Order Reversal, LOR, if we try to
2296 	         * proceed !
2297 	         */
2298 		(void) usb_proc_msignal(info->done_p,
2299 		    &info->done_m[0], &info->done_m[1]);
2300 	} else {
2301 		/* clear second recurse flag */
2302 		pq->recurse_2 = 0;
2303 	}
2304 	return;
2305 
2306 }
2307 
2308 /*------------------------------------------------------------------------*
2309  *	usbd_callback_wrapper
2310  *
2311  * This is a wrapper for USB callbacks. This wrapper does some
2312  * auto-magic things like figuring out if we can call the callback
2313  * directly from the current context or if we need to wakeup the
2314  * interrupt process.
2315  *------------------------------------------------------------------------*/
2316 static void
2317 usbd_callback_wrapper(struct usb_xfer_queue *pq)
2318 {
2319 	struct usb_xfer *xfer = pq->curr;
2320 	struct usb_xfer_root *info = xfer->xroot;
2321 
2322 	USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2323 	if ((pq->recurse_3 != 0 || mtx_owned(info->xfer_mtx) == 0) &&
2324 	    USB_IN_POLLING_MODE_FUNC() == 0) {
2325 		/*
2326 	       	 * Cases that end up here:
2327 		 *
2328 		 * 5) HW interrupt done callback or other source.
2329 		 * 6) HW completed transfer during callback
2330 		 */
2331 		DPRINTFN(3, "case 5 and 6\n");
2332 
2333 		/*
2334 	         * We have to postpone the callback due to the fact we
2335 	         * will have a Lock Order Reversal, LOR, if we try to
2336 	         * proceed!
2337 		 *
2338 		 * Postponing the callback also ensures that other USB
2339 		 * transfer queues get a chance.
2340 	         */
2341 		(void) usb_proc_msignal(info->done_p,
2342 		    &info->done_m[0], &info->done_m[1]);
2343 		return;
2344 	}
2345 	/*
2346 	 * Cases that end up here:
2347 	 *
2348 	 * 1) We are starting a transfer
2349 	 * 2) We are prematurely calling back a transfer
2350 	 * 3) We are stopping a transfer
2351 	 * 4) We are doing an ordinary callback
2352 	 */
2353 	DPRINTFN(3, "case 1-4\n");
2354 	/* get next USB transfer in the queue */
2355 	info->done_q.curr = NULL;
2356 
2357 	/* set flag in case of drain */
2358 	xfer->flags_int.doing_callback = 1;
2359 
2360 	USB_BUS_UNLOCK(info->bus);
2361 	USB_BUS_LOCK_ASSERT(info->bus, MA_NOTOWNED);
2362 
2363 	/* set correct USB state for callback */
2364 	if (!xfer->flags_int.transferring) {
2365 		xfer->usb_state = USB_ST_SETUP;
2366 		if (!xfer->flags_int.started) {
2367 			/* we got stopped before we even got started */
2368 			USB_BUS_LOCK(info->bus);
2369 			goto done;
2370 		}
2371 	} else {
2372 
2373 		if (usbd_callback_wrapper_sub(xfer)) {
2374 			/* the callback has been deferred */
2375 			USB_BUS_LOCK(info->bus);
2376 			goto done;
2377 		}
2378 #if USB_HAVE_POWERD
2379 		/* decrement power reference */
2380 		usbd_transfer_power_ref(xfer, -1);
2381 #endif
2382 		xfer->flags_int.transferring = 0;
2383 
2384 		if (xfer->error) {
2385 			xfer->usb_state = USB_ST_ERROR;
2386 		} else {
2387 			/* set transferred state */
2388 			xfer->usb_state = USB_ST_TRANSFERRED;
2389 #if USB_HAVE_BUSDMA
2390 			/* sync DMA memory, if any */
2391 			if (xfer->flags_int.bdma_enable &&
2392 			    (!xfer->flags_int.bdma_no_post_sync)) {
2393 				usb_bdma_post_sync(xfer);
2394 			}
2395 #endif
2396 		}
2397 	}
2398 
2399 #if USB_HAVE_PF
2400 	if (xfer->usb_state != USB_ST_SETUP) {
2401 		USB_BUS_LOCK(info->bus);
2402 		usbpf_xfertap(xfer, USBPF_XFERTAP_DONE);
2403 		USB_BUS_UNLOCK(info->bus);
2404 	}
2405 #endif
2406 	/* call processing routine */
2407 	(xfer->callback) (xfer, xfer->error);
2408 
2409 	/* pickup the USB mutex again */
2410 	USB_BUS_LOCK(info->bus);
2411 
2412 	/*
2413 	 * Check if we got started after that we got cancelled, but
2414 	 * before we managed to do the callback.
2415 	 */
2416 	if ((!xfer->flags_int.open) &&
2417 	    (xfer->flags_int.started) &&
2418 	    (xfer->usb_state == USB_ST_ERROR)) {
2419 		/* clear flag in case of drain */
2420 		xfer->flags_int.doing_callback = 0;
2421 		/* try to loop, but not recursivly */
2422 		usb_command_wrapper(&info->done_q, xfer);
2423 		return;
2424 	}
2425 
2426 done:
2427 	/* clear flag in case of drain */
2428 	xfer->flags_int.doing_callback = 0;
2429 
2430 	/*
2431 	 * Check if we are draining.
2432 	 */
2433 	if (xfer->flags_int.draining &&
2434 	    (!xfer->flags_int.transferring)) {
2435 		/* "usbd_transfer_drain()" is waiting for end of transfer */
2436 		xfer->flags_int.draining = 0;
2437 		cv_broadcast(&info->cv_drain);
2438 	}
2439 
2440 	/* do the next callback, if any */
2441 	usb_command_wrapper(&info->done_q,
2442 	    info->done_q.curr);
2443 }
2444 
2445 /*------------------------------------------------------------------------*
2446  *	usb_dma_delay_done_cb
2447  *
2448  * This function is called when the DMA delay has been exectuded, and
2449  * will make sure that the callback is called to complete the USB
2450  * transfer. This code path is usually only used when there is an USB
2451  * error like USB_ERR_CANCELLED.
2452  *------------------------------------------------------------------------*/
2453 void
2454 usb_dma_delay_done_cb(struct usb_xfer *xfer)
2455 {
2456 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2457 
2458 	DPRINTFN(3, "Completed %p\n", xfer);
2459 
2460 	/* queue callback for execution, again */
2461 	usbd_transfer_done(xfer, 0);
2462 }
2463 
2464 /*------------------------------------------------------------------------*
2465  *	usbd_transfer_dequeue
2466  *
2467  *  - This function is used to remove an USB transfer from a USB
2468  *  transfer queue.
2469  *
2470  *  - This function can be called multiple times in a row.
2471  *------------------------------------------------------------------------*/
2472 void
2473 usbd_transfer_dequeue(struct usb_xfer *xfer)
2474 {
2475 	struct usb_xfer_queue *pq;
2476 
2477 	pq = xfer->wait_queue;
2478 	if (pq) {
2479 		TAILQ_REMOVE(&pq->head, xfer, wait_entry);
2480 		xfer->wait_queue = NULL;
2481 	}
2482 }
2483 
2484 /*------------------------------------------------------------------------*
2485  *	usbd_transfer_enqueue
2486  *
2487  *  - This function is used to insert an USB transfer into a USB *
2488  *  transfer queue.
2489  *
2490  *  - This function can be called multiple times in a row.
2491  *------------------------------------------------------------------------*/
2492 void
2493 usbd_transfer_enqueue(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2494 {
2495 	/*
2496 	 * Insert the USB transfer into the queue, if it is not
2497 	 * already on a USB transfer queue:
2498 	 */
2499 	if (xfer->wait_queue == NULL) {
2500 		xfer->wait_queue = pq;
2501 		TAILQ_INSERT_TAIL(&pq->head, xfer, wait_entry);
2502 	}
2503 }
2504 
2505 /*------------------------------------------------------------------------*
2506  *	usbd_transfer_done
2507  *
2508  *  - This function is used to remove an USB transfer from the busdma,
2509  *  pipe or interrupt queue.
2510  *
2511  *  - This function is used to queue the USB transfer on the done
2512  *  queue.
2513  *
2514  *  - This function is used to stop any USB transfer timeouts.
2515  *------------------------------------------------------------------------*/
2516 void
2517 usbd_transfer_done(struct usb_xfer *xfer, usb_error_t error)
2518 {
2519 	struct usb_xfer_root *info = xfer->xroot;
2520 
2521 	USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2522 
2523 	DPRINTF("err=%s\n", usbd_errstr(error));
2524 
2525 	/*
2526 	 * If we are not transferring then just return.
2527 	 * This can happen during transfer cancel.
2528 	 */
2529 	if (!xfer->flags_int.transferring) {
2530 		DPRINTF("not transferring\n");
2531 		/* end of control transfer, if any */
2532 		xfer->flags_int.control_act = 0;
2533 		return;
2534 	}
2535 	/* only set transfer error, if not already set */
2536 	if (xfer->error == USB_ERR_NORMAL_COMPLETION)
2537 		xfer->error = error;
2538 
2539 	/* stop any callouts */
2540 	usb_callout_stop(&xfer->timeout_handle);
2541 
2542 	/*
2543 	 * If we are waiting on a queue, just remove the USB transfer
2544 	 * from the queue, if any. We should have the required locks
2545 	 * locked to do the remove when this function is called.
2546 	 */
2547 	usbd_transfer_dequeue(xfer);
2548 
2549 #if USB_HAVE_BUSDMA
2550 	if (mtx_owned(info->xfer_mtx)) {
2551 		struct usb_xfer_queue *pq;
2552 
2553 		/*
2554 		 * If the private USB lock is not locked, then we assume
2555 		 * that the BUS-DMA load stage has been passed:
2556 		 */
2557 		pq = &info->dma_q;
2558 
2559 		if (pq->curr == xfer) {
2560 			/* start the next BUS-DMA load, if any */
2561 			usb_command_wrapper(pq, NULL);
2562 		}
2563 	}
2564 #endif
2565 	/* keep some statistics */
2566 	if (xfer->error) {
2567 		info->bus->stats_err.uds_requests
2568 		    [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2569 	} else {
2570 		info->bus->stats_ok.uds_requests
2571 		    [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2572 	}
2573 
2574 	/* call the USB transfer callback */
2575 	usbd_callback_ss_done_defer(xfer);
2576 }
2577 
2578 /*------------------------------------------------------------------------*
2579  *	usbd_transfer_start_cb
2580  *
2581  * This function is called to start the USB transfer when
2582  * "xfer->interval" is greater than zero, and and the endpoint type is
2583  * BULK or CONTROL.
2584  *------------------------------------------------------------------------*/
2585 static void
2586 usbd_transfer_start_cb(void *arg)
2587 {
2588 	struct usb_xfer *xfer = arg;
2589 	struct usb_endpoint *ep = xfer->endpoint;
2590 
2591 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2592 
2593 	DPRINTF("start\n");
2594 
2595 #if USB_HAVE_PF
2596 	usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2597 #endif
2598 
2599 	/* the transfer can now be cancelled */
2600 	xfer->flags_int.can_cancel_immed = 1;
2601 
2602 	/* start USB transfer, if no error */
2603 	if (xfer->error == 0)
2604 		(ep->methods->start) (xfer);
2605 
2606 	/* check for transfer error */
2607 	if (xfer->error) {
2608 		/* some error has happened */
2609 		usbd_transfer_done(xfer, 0);
2610 	}
2611 }
2612 
2613 /*------------------------------------------------------------------------*
2614  *	usbd_xfer_set_stall
2615  *
2616  * This function is used to set the stall flag outside the
2617  * callback. This function is NULL safe.
2618  *------------------------------------------------------------------------*/
2619 void
2620 usbd_xfer_set_stall(struct usb_xfer *xfer)
2621 {
2622 	if (xfer == NULL) {
2623 		/* tearing down */
2624 		return;
2625 	}
2626 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2627 
2628 	/* avoid any races by locking the USB mutex */
2629 	USB_BUS_LOCK(xfer->xroot->bus);
2630 	xfer->flags.stall_pipe = 1;
2631 	USB_BUS_UNLOCK(xfer->xroot->bus);
2632 }
2633 
2634 int
2635 usbd_xfer_is_stalled(struct usb_xfer *xfer)
2636 {
2637 	return (xfer->endpoint->is_stalled);
2638 }
2639 
2640 /*------------------------------------------------------------------------*
2641  *	usbd_transfer_clear_stall
2642  *
2643  * This function is used to clear the stall flag outside the
2644  * callback. This function is NULL safe.
2645  *------------------------------------------------------------------------*/
2646 void
2647 usbd_transfer_clear_stall(struct usb_xfer *xfer)
2648 {
2649 	if (xfer == NULL) {
2650 		/* tearing down */
2651 		return;
2652 	}
2653 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2654 
2655 	/* avoid any races by locking the USB mutex */
2656 	USB_BUS_LOCK(xfer->xroot->bus);
2657 
2658 	xfer->flags.stall_pipe = 0;
2659 
2660 	USB_BUS_UNLOCK(xfer->xroot->bus);
2661 }
2662 
2663 /*------------------------------------------------------------------------*
2664  *	usbd_pipe_start
2665  *
2666  * This function is used to add an USB transfer to the pipe transfer list.
2667  *------------------------------------------------------------------------*/
2668 void
2669 usbd_pipe_start(struct usb_xfer_queue *pq)
2670 {
2671 	struct usb_endpoint *ep;
2672 	struct usb_xfer *xfer;
2673 	uint8_t type;
2674 
2675 	xfer = pq->curr;
2676 	ep = xfer->endpoint;
2677 
2678 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2679 
2680 	/*
2681 	 * If the endpoint is already stalled we do nothing !
2682 	 */
2683 	if (ep->is_stalled) {
2684 		return;
2685 	}
2686 	/*
2687 	 * Check if we are supposed to stall the endpoint:
2688 	 */
2689 	if (xfer->flags.stall_pipe) {
2690 		struct usb_device *udev;
2691 		struct usb_xfer_root *info;
2692 
2693 		/* clear stall command */
2694 		xfer->flags.stall_pipe = 0;
2695 
2696 		/* get pointer to USB device */
2697 		info = xfer->xroot;
2698 		udev = info->udev;
2699 
2700 		/*
2701 		 * Only stall BULK and INTERRUPT endpoints.
2702 		 */
2703 		type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2704 		if ((type == UE_BULK) ||
2705 		    (type == UE_INTERRUPT)) {
2706 			uint8_t did_stall;
2707 
2708 			did_stall = 1;
2709 
2710 			if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2711 				(udev->bus->methods->set_stall) (
2712 				    udev, ep, &did_stall);
2713 			} else if (udev->ctrl_xfer[1]) {
2714 				info = udev->ctrl_xfer[1]->xroot;
2715 				usb_proc_msignal(
2716 				    USB_BUS_CS_PROC(info->bus),
2717 				    &udev->cs_msg[0], &udev->cs_msg[1]);
2718 			} else {
2719 				/* should not happen */
2720 				DPRINTFN(0, "No stall handler\n");
2721 			}
2722 			/*
2723 			 * Check if we should stall. Some USB hardware
2724 			 * handles set- and clear-stall in hardware.
2725 			 */
2726 			if (did_stall) {
2727 				/*
2728 				 * The transfer will be continued when
2729 				 * the clear-stall control endpoint
2730 				 * message is received.
2731 				 */
2732 				ep->is_stalled = 1;
2733 				return;
2734 			}
2735 		} else if (type == UE_ISOCHRONOUS) {
2736 
2737 			/*
2738 			 * Make sure any FIFO overflow or other FIFO
2739 			 * error conditions go away by resetting the
2740 			 * endpoint FIFO through the clear stall
2741 			 * method.
2742 			 */
2743 			if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2744 				(udev->bus->methods->clear_stall) (udev, ep);
2745 			}
2746 		}
2747 	}
2748 	/* Set or clear stall complete - special case */
2749 	if (xfer->nframes == 0) {
2750 		/* we are complete */
2751 		xfer->aframes = 0;
2752 		usbd_transfer_done(xfer, 0);
2753 		return;
2754 	}
2755 	/*
2756 	 * Handled cases:
2757 	 *
2758 	 * 1) Start the first transfer queued.
2759 	 *
2760 	 * 2) Re-start the current USB transfer.
2761 	 */
2762 	/*
2763 	 * Check if there should be any
2764 	 * pre transfer start delay:
2765 	 */
2766 	if (xfer->interval > 0) {
2767 		type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2768 		if ((type == UE_BULK) ||
2769 		    (type == UE_CONTROL)) {
2770 			usbd_transfer_timeout_ms(xfer,
2771 			    &usbd_transfer_start_cb,
2772 			    xfer->interval);
2773 			return;
2774 		}
2775 	}
2776 	DPRINTF("start\n");
2777 
2778 #if USB_HAVE_PF
2779 	usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2780 #endif
2781 	/* the transfer can now be cancelled */
2782 	xfer->flags_int.can_cancel_immed = 1;
2783 
2784 	/* start USB transfer, if no error */
2785 	if (xfer->error == 0)
2786 		(ep->methods->start) (xfer);
2787 
2788 	/* check for transfer error */
2789 	if (xfer->error) {
2790 		/* some error has happened */
2791 		usbd_transfer_done(xfer, 0);
2792 	}
2793 }
2794 
2795 /*------------------------------------------------------------------------*
2796  *	usbd_transfer_timeout_ms
2797  *
2798  * This function is used to setup a timeout on the given USB
2799  * transfer. If the timeout has been deferred the callback given by
2800  * "cb" will get called after "ms" milliseconds.
2801  *------------------------------------------------------------------------*/
2802 void
2803 usbd_transfer_timeout_ms(struct usb_xfer *xfer,
2804     void (*cb) (void *arg), usb_timeout_t ms)
2805 {
2806 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2807 
2808 	/* defer delay */
2809 	usb_callout_reset(&xfer->timeout_handle,
2810 	    USB_MS_TO_TICKS(ms) + USB_CALLOUT_ZERO_TICKS, cb, xfer);
2811 }
2812 
2813 /*------------------------------------------------------------------------*
2814  *	usbd_callback_wrapper_sub
2815  *
2816  *  - This function will update variables in an USB transfer after
2817  *  that the USB transfer is complete.
2818  *
2819  *  - This function is used to start the next USB transfer on the
2820  *  ep transfer queue, if any.
2821  *
2822  * NOTE: In some special cases the USB transfer will not be removed from
2823  * the pipe queue, but remain first. To enforce USB transfer removal call
2824  * this function passing the error code "USB_ERR_CANCELLED".
2825  *
2826  * Return values:
2827  * 0: Success.
2828  * Else: The callback has been deferred.
2829  *------------------------------------------------------------------------*/
2830 static uint8_t
2831 usbd_callback_wrapper_sub(struct usb_xfer *xfer)
2832 {
2833 	struct usb_endpoint *ep;
2834 	struct usb_bus *bus;
2835 	usb_frcount_t x;
2836 
2837 	bus = xfer->xroot->bus;
2838 
2839 	if ((!xfer->flags_int.open) &&
2840 	    (!xfer->flags_int.did_close)) {
2841 		DPRINTF("close\n");
2842 		USB_BUS_LOCK(bus);
2843 		(xfer->endpoint->methods->close) (xfer);
2844 		USB_BUS_UNLOCK(bus);
2845 		/* only close once */
2846 		xfer->flags_int.did_close = 1;
2847 		return (1);		/* wait for new callback */
2848 	}
2849 	/*
2850 	 * If we have a non-hardware induced error we
2851 	 * need to do the DMA delay!
2852 	 */
2853 	if (xfer->error != 0 && !xfer->flags_int.did_dma_delay &&
2854 	    (xfer->error == USB_ERR_CANCELLED ||
2855 	    xfer->error == USB_ERR_TIMEOUT ||
2856 	    bus->methods->start_dma_delay != NULL)) {
2857 
2858 		usb_timeout_t temp;
2859 
2860 		/* only delay once */
2861 		xfer->flags_int.did_dma_delay = 1;
2862 
2863 		/* we can not cancel this delay */
2864 		xfer->flags_int.can_cancel_immed = 0;
2865 
2866 		temp = usbd_get_dma_delay(xfer->xroot->udev);
2867 
2868 		DPRINTFN(3, "DMA delay, %u ms, "
2869 		    "on %p\n", temp, xfer);
2870 
2871 		if (temp != 0) {
2872 			USB_BUS_LOCK(bus);
2873 			/*
2874 			 * Some hardware solutions have dedicated
2875 			 * events when it is safe to free DMA'ed
2876 			 * memory. For the other hardware platforms we
2877 			 * use a static delay.
2878 			 */
2879 			if (bus->methods->start_dma_delay != NULL) {
2880 				(bus->methods->start_dma_delay) (xfer);
2881 			} else {
2882 				usbd_transfer_timeout_ms(xfer,
2883 				    (void (*)(void *))&usb_dma_delay_done_cb,
2884 				    temp);
2885 			}
2886 			USB_BUS_UNLOCK(bus);
2887 			return (1);	/* wait for new callback */
2888 		}
2889 	}
2890 	/* check actual number of frames */
2891 	if (xfer->aframes > xfer->nframes) {
2892 		if (xfer->error == 0) {
2893 			panic("%s: actual number of frames, %d, is "
2894 			    "greater than initial number of frames, %d\n",
2895 			    __FUNCTION__, xfer->aframes, xfer->nframes);
2896 		} else {
2897 			/* just set some valid value */
2898 			xfer->aframes = xfer->nframes;
2899 		}
2900 	}
2901 	/* compute actual length */
2902 	xfer->actlen = 0;
2903 
2904 	for (x = 0; x != xfer->aframes; x++) {
2905 		xfer->actlen += xfer->frlengths[x];
2906 	}
2907 
2908 	/*
2909 	 * Frames that were not transferred get zero actual length in
2910 	 * case the USB device driver does not check the actual number
2911 	 * of frames transferred, "xfer->aframes":
2912 	 */
2913 	for (; x < xfer->nframes; x++) {
2914 		usbd_xfer_set_frame_len(xfer, x, 0);
2915 	}
2916 
2917 	/* check actual length */
2918 	if (xfer->actlen > xfer->sumlen) {
2919 		if (xfer->error == 0) {
2920 			panic("%s: actual length, %d, is greater than "
2921 			    "initial length, %d\n",
2922 			    __FUNCTION__, xfer->actlen, xfer->sumlen);
2923 		} else {
2924 			/* just set some valid value */
2925 			xfer->actlen = xfer->sumlen;
2926 		}
2927 	}
2928 	DPRINTFN(1, "xfer=%p endpoint=%p sts=%d alen=%d, slen=%d, afrm=%d, nfrm=%d\n",
2929 	    xfer, xfer->endpoint, xfer->error, xfer->actlen, xfer->sumlen,
2930 	    xfer->aframes, xfer->nframes);
2931 
2932 	if (xfer->error) {
2933 		/* end of control transfer, if any */
2934 		xfer->flags_int.control_act = 0;
2935 
2936 #if USB_HAVE_TT_SUPPORT
2937 		switch (xfer->error) {
2938 		case USB_ERR_NORMAL_COMPLETION:
2939 		case USB_ERR_SHORT_XFER:
2940 		case USB_ERR_STALLED:
2941 		case USB_ERR_CANCELLED:
2942 			/* nothing to do */
2943 			break;
2944 		default:
2945 			/* try to reset the TT, if any */
2946 			USB_BUS_LOCK(bus);
2947 			uhub_tt_buffer_reset_async_locked(xfer->xroot->udev, xfer->endpoint);
2948 			USB_BUS_UNLOCK(bus);
2949 			break;
2950 		}
2951 #endif
2952 		/* check if we should block the execution queue */
2953 		if ((xfer->error != USB_ERR_CANCELLED) &&
2954 		    (xfer->flags.pipe_bof)) {
2955 			DPRINTFN(2, "xfer=%p: Block On Failure "
2956 			    "on endpoint=%p\n", xfer, xfer->endpoint);
2957 			goto done;
2958 		}
2959 	} else {
2960 		/* check for short transfers */
2961 		if (xfer->actlen < xfer->sumlen) {
2962 
2963 			/* end of control transfer, if any */
2964 			xfer->flags_int.control_act = 0;
2965 
2966 			if (!xfer->flags_int.short_xfer_ok) {
2967 				xfer->error = USB_ERR_SHORT_XFER;
2968 				if (xfer->flags.pipe_bof) {
2969 					DPRINTFN(2, "xfer=%p: Block On Failure on "
2970 					    "Short Transfer on endpoint %p.\n",
2971 					    xfer, xfer->endpoint);
2972 					goto done;
2973 				}
2974 			}
2975 		} else {
2976 			/*
2977 			 * Check if we are in the middle of a
2978 			 * control transfer:
2979 			 */
2980 			if (xfer->flags_int.control_act) {
2981 				DPRINTFN(5, "xfer=%p: Control transfer "
2982 				    "active on endpoint=%p\n", xfer, xfer->endpoint);
2983 				goto done;
2984 			}
2985 		}
2986 	}
2987 
2988 	ep = xfer->endpoint;
2989 
2990 	/*
2991 	 * If the current USB transfer is completing we need to start the
2992 	 * next one:
2993 	 */
2994 	USB_BUS_LOCK(bus);
2995 	if (ep->endpoint_q[xfer->stream_id].curr == xfer) {
2996 		usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], NULL);
2997 
2998 		if (ep->endpoint_q[xfer->stream_id].curr != NULL ||
2999 		    TAILQ_FIRST(&ep->endpoint_q[xfer->stream_id].head) != NULL) {
3000 			/* there is another USB transfer waiting */
3001 		} else {
3002 			/* this is the last USB transfer */
3003 			/* clear isochronous sync flag */
3004 			xfer->endpoint->is_synced = 0;
3005 		}
3006 	}
3007 	USB_BUS_UNLOCK(bus);
3008 done:
3009 	return (0);
3010 }
3011 
3012 /*------------------------------------------------------------------------*
3013  *	usb_command_wrapper
3014  *
3015  * This function is used to execute commands non-recursivly on an USB
3016  * transfer.
3017  *------------------------------------------------------------------------*/
3018 void
3019 usb_command_wrapper(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
3020 {
3021 	if (xfer) {
3022 		/*
3023 		 * If the transfer is not already processing,
3024 		 * queue it!
3025 		 */
3026 		if (pq->curr != xfer) {
3027 			usbd_transfer_enqueue(pq, xfer);
3028 			if (pq->curr != NULL) {
3029 				/* something is already processing */
3030 				DPRINTFN(6, "busy %p\n", pq->curr);
3031 				return;
3032 			}
3033 		}
3034 	} else {
3035 		/* Get next element in queue */
3036 		pq->curr = NULL;
3037 	}
3038 
3039 	if (!pq->recurse_1) {
3040 
3041 		/* clear third recurse flag */
3042 		pq->recurse_3 = 0;
3043 
3044 		do {
3045 			/* set two first recurse flags */
3046 			pq->recurse_1 = 1;
3047 			pq->recurse_2 = 1;
3048 
3049 			if (pq->curr == NULL) {
3050 				xfer = TAILQ_FIRST(&pq->head);
3051 				if (xfer) {
3052 					TAILQ_REMOVE(&pq->head, xfer,
3053 					    wait_entry);
3054 					xfer->wait_queue = NULL;
3055 					pq->curr = xfer;
3056 				} else {
3057 					break;
3058 				}
3059 			}
3060 			DPRINTFN(6, "cb %p (enter)\n", pq->curr);
3061 			(pq->command) (pq);
3062 			DPRINTFN(6, "cb %p (leave)\n", pq->curr);
3063 
3064 			/*
3065 			 * Set third recurse flag to indicate
3066 			 * recursion happened:
3067 			 */
3068 			pq->recurse_3 = 1;
3069 
3070 		} while (!pq->recurse_2);
3071 
3072 		/* clear first recurse flag */
3073 		pq->recurse_1 = 0;
3074 
3075 	} else {
3076 		/* clear second recurse flag */
3077 		pq->recurse_2 = 0;
3078 	}
3079 }
3080 
3081 /*------------------------------------------------------------------------*
3082  *	usbd_ctrl_transfer_setup
3083  *
3084  * This function is used to setup the default USB control endpoint
3085  * transfer.
3086  *------------------------------------------------------------------------*/
3087 void
3088 usbd_ctrl_transfer_setup(struct usb_device *udev)
3089 {
3090 	struct usb_xfer *xfer;
3091 	uint8_t no_resetup;
3092 	uint8_t iface_index;
3093 
3094 	/* check for root HUB */
3095 	if (udev->parent_hub == NULL)
3096 		return;
3097 repeat:
3098 
3099 	xfer = udev->ctrl_xfer[0];
3100 	if (xfer) {
3101 		USB_XFER_LOCK(xfer);
3102 		no_resetup =
3103 		    ((xfer->address == udev->address) &&
3104 		    (udev->ctrl_ep_desc.wMaxPacketSize[0] ==
3105 		    udev->ddesc.bMaxPacketSize));
3106 		if (udev->flags.usb_mode == USB_MODE_DEVICE) {
3107 			if (no_resetup) {
3108 				/*
3109 				 * NOTE: checking "xfer->address" and
3110 				 * starting the USB transfer must be
3111 				 * atomic!
3112 				 */
3113 				usbd_transfer_start(xfer);
3114 			}
3115 		}
3116 		USB_XFER_UNLOCK(xfer);
3117 	} else {
3118 		no_resetup = 0;
3119 	}
3120 
3121 	if (no_resetup) {
3122 		/*
3123 	         * All parameters are exactly the same like before.
3124 	         * Just return.
3125 	         */
3126 		return;
3127 	}
3128 	/*
3129 	 * Update wMaxPacketSize for the default control endpoint:
3130 	 */
3131 	udev->ctrl_ep_desc.wMaxPacketSize[0] =
3132 	    udev->ddesc.bMaxPacketSize;
3133 
3134 	/*
3135 	 * Unsetup any existing USB transfer:
3136 	 */
3137 	usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX);
3138 
3139 	/*
3140 	 * Reset clear stall error counter.
3141 	 */
3142 	udev->clear_stall_errors = 0;
3143 
3144 	/*
3145 	 * Try to setup a new USB transfer for the
3146 	 * default control endpoint:
3147 	 */
3148 	iface_index = 0;
3149 	if (usbd_transfer_setup(udev, &iface_index,
3150 	    udev->ctrl_xfer, usb_control_ep_cfg, USB_CTRL_XFER_MAX, NULL,
3151 	    &udev->device_mtx)) {
3152 		DPRINTFN(0, "could not setup default "
3153 		    "USB transfer\n");
3154 	} else {
3155 		goto repeat;
3156 	}
3157 }
3158 
3159 /*------------------------------------------------------------------------*
3160  *	usbd_clear_data_toggle - factored out code
3161  *
3162  * NOTE: the intention of this function is not to reset the hardware
3163  * data toggle.
3164  *------------------------------------------------------------------------*/
3165 void
3166 usbd_clear_stall_locked(struct usb_device *udev, struct usb_endpoint *ep)
3167 {
3168 	USB_BUS_LOCK_ASSERT(udev->bus, MA_OWNED);
3169 
3170 	/* check that we have a valid case */
3171 	if (udev->flags.usb_mode == USB_MODE_HOST &&
3172 	    udev->parent_hub != NULL &&
3173 	    udev->bus->methods->clear_stall != NULL &&
3174 	    ep->methods != NULL) {
3175 		(udev->bus->methods->clear_stall) (udev, ep);
3176 	}
3177 }
3178 
3179 /*------------------------------------------------------------------------*
3180  *	usbd_clear_data_toggle - factored out code
3181  *
3182  * NOTE: the intention of this function is not to reset the hardware
3183  * data toggle on the USB device side.
3184  *------------------------------------------------------------------------*/
3185 void
3186 usbd_clear_data_toggle(struct usb_device *udev, struct usb_endpoint *ep)
3187 {
3188 	DPRINTFN(5, "udev=%p endpoint=%p\n", udev, ep);
3189 
3190 	USB_BUS_LOCK(udev->bus);
3191 	ep->toggle_next = 0;
3192 	/* some hardware needs a callback to clear the data toggle */
3193 	usbd_clear_stall_locked(udev, ep);
3194 	USB_BUS_UNLOCK(udev->bus);
3195 }
3196 
3197 /*------------------------------------------------------------------------*
3198  *	usbd_clear_stall_callback - factored out clear stall callback
3199  *
3200  * Input parameters:
3201  *  xfer1: Clear Stall Control Transfer
3202  *  xfer2: Stalled USB Transfer
3203  *
3204  * This function is NULL safe.
3205  *
3206  * Return values:
3207  *   0: In progress
3208  *   Else: Finished
3209  *
3210  * Clear stall config example:
3211  *
3212  * static const struct usb_config my_clearstall =  {
3213  *	.type = UE_CONTROL,
3214  *	.endpoint = 0,
3215  *	.direction = UE_DIR_ANY,
3216  *	.interval = 50, //50 milliseconds
3217  *	.bufsize = sizeof(struct usb_device_request),
3218  *	.timeout = 1000, //1.000 seconds
3219  *	.callback = &my_clear_stall_callback, // **
3220  *	.usb_mode = USB_MODE_HOST,
3221  * };
3222  *
3223  * ** "my_clear_stall_callback" calls "usbd_clear_stall_callback"
3224  * passing the correct parameters.
3225  *------------------------------------------------------------------------*/
3226 uint8_t
3227 usbd_clear_stall_callback(struct usb_xfer *xfer1,
3228     struct usb_xfer *xfer2)
3229 {
3230 	struct usb_device_request req;
3231 
3232 	if (xfer2 == NULL) {
3233 		/* looks like we are tearing down */
3234 		DPRINTF("NULL input parameter\n");
3235 		return (0);
3236 	}
3237 	USB_XFER_LOCK_ASSERT(xfer1, MA_OWNED);
3238 	USB_XFER_LOCK_ASSERT(xfer2, MA_OWNED);
3239 
3240 	switch (USB_GET_STATE(xfer1)) {
3241 	case USB_ST_SETUP:
3242 
3243 		/*
3244 		 * pre-clear the data toggle to DATA0 ("umass.c" and
3245 		 * "ata-usb.c" depends on this)
3246 		 */
3247 
3248 		usbd_clear_data_toggle(xfer2->xroot->udev, xfer2->endpoint);
3249 
3250 		/* setup a clear-stall packet */
3251 
3252 		req.bmRequestType = UT_WRITE_ENDPOINT;
3253 		req.bRequest = UR_CLEAR_FEATURE;
3254 		USETW(req.wValue, UF_ENDPOINT_HALT);
3255 		req.wIndex[0] = xfer2->endpoint->edesc->bEndpointAddress;
3256 		req.wIndex[1] = 0;
3257 		USETW(req.wLength, 0);
3258 
3259 		/*
3260 		 * "usbd_transfer_setup_sub()" will ensure that
3261 		 * we have sufficient room in the buffer for
3262 		 * the request structure!
3263 		 */
3264 
3265 		/* copy in the transfer */
3266 
3267 		usbd_copy_in(xfer1->frbuffers, 0, &req, sizeof(req));
3268 
3269 		/* set length */
3270 		xfer1->frlengths[0] = sizeof(req);
3271 		xfer1->nframes = 1;
3272 
3273 		usbd_transfer_submit(xfer1);
3274 		return (0);
3275 
3276 	case USB_ST_TRANSFERRED:
3277 		break;
3278 
3279 	default:			/* Error */
3280 		if (xfer1->error == USB_ERR_CANCELLED) {
3281 			return (0);
3282 		}
3283 		break;
3284 	}
3285 	return (1);			/* Clear Stall Finished */
3286 }
3287 
3288 /*------------------------------------------------------------------------*
3289  *	usbd_transfer_poll
3290  *
3291  * The following function gets called from the USB keyboard driver and
3292  * UMASS when the system has paniced.
3293  *
3294  * NOTE: It is currently not possible to resume normal operation on
3295  * the USB controller which has been polled, due to clearing of the
3296  * "up_dsleep" and "up_msleep" flags.
3297  *------------------------------------------------------------------------*/
3298 void
3299 usbd_transfer_poll(struct usb_xfer **ppxfer, uint16_t max)
3300 {
3301 	struct usb_xfer *xfer;
3302 	struct usb_xfer_root *xroot;
3303 	struct usb_device *udev;
3304 	struct usb_proc_msg *pm;
3305 	struct usb_bus *bus;
3306 	uint16_t n;
3307 	uint16_t drop_bus_spin;
3308 	uint16_t drop_bus;
3309 	uint16_t drop_xfer;
3310 
3311 	for (n = 0; n != max; n++) {
3312 		/* Extra checks to avoid panic */
3313 		xfer = ppxfer[n];
3314 		if (xfer == NULL)
3315 			continue;	/* no USB transfer */
3316 		xroot = xfer->xroot;
3317 		if (xroot == NULL)
3318 			continue;	/* no USB root */
3319 		udev = xroot->udev;
3320 		if (udev == NULL)
3321 			continue;	/* no USB device */
3322 		bus = udev->bus;
3323 		if (bus == NULL)
3324 			continue;	/* no BUS structure */
3325 		if (bus->methods == NULL)
3326 			continue;	/* no BUS methods */
3327 		if (bus->methods->xfer_poll == NULL)
3328 			continue;	/* no poll method */
3329 
3330 		drop_bus_spin = 0;
3331 		drop_bus = 0;
3332 		drop_xfer = 0;
3333 
3334 		if (USB_IN_POLLING_MODE_FUNC() == 0) {
3335 			/* make sure that the BUS spin mutex is not locked */
3336 			while (mtx_owned(&bus->bus_spin_lock)) {
3337 				mtx_unlock_spin(&bus->bus_spin_lock);
3338 				drop_bus_spin++;
3339 			}
3340 
3341 			/* make sure that the BUS mutex is not locked */
3342 			while (mtx_owned(&bus->bus_mtx)) {
3343 				mtx_unlock(&bus->bus_mtx);
3344 				drop_bus++;
3345 			}
3346 
3347 			/* make sure that the transfer mutex is not locked */
3348 			while (mtx_owned(xroot->xfer_mtx)) {
3349 				mtx_unlock(xroot->xfer_mtx);
3350 				drop_xfer++;
3351 			}
3352 		}
3353 
3354 		/* Make sure cv_signal() and cv_broadcast() is not called */
3355 		USB_BUS_CONTROL_XFER_PROC(bus)->up_msleep = 0;
3356 		USB_BUS_EXPLORE_PROC(bus)->up_msleep = 0;
3357 		USB_BUS_GIANT_PROC(bus)->up_msleep = 0;
3358 		USB_BUS_NON_GIANT_ISOC_PROC(bus)->up_msleep = 0;
3359 		USB_BUS_NON_GIANT_BULK_PROC(bus)->up_msleep = 0;
3360 
3361 		/* poll USB hardware */
3362 		(bus->methods->xfer_poll) (bus);
3363 
3364 		USB_BUS_LOCK(xroot->bus);
3365 
3366 		/* check for clear stall */
3367 		if (udev->ctrl_xfer[1] != NULL) {
3368 
3369 			/* poll clear stall start */
3370 			pm = &udev->cs_msg[0].hdr;
3371 			(pm->pm_callback) (pm);
3372 			/* poll clear stall done thread */
3373 			pm = &udev->ctrl_xfer[1]->
3374 			    xroot->done_m[0].hdr;
3375 			(pm->pm_callback) (pm);
3376 		}
3377 
3378 		/* poll done thread */
3379 		pm = &xroot->done_m[0].hdr;
3380 		(pm->pm_callback) (pm);
3381 
3382 		USB_BUS_UNLOCK(xroot->bus);
3383 
3384 		/* restore transfer mutex */
3385 		while (drop_xfer--)
3386 			mtx_lock(xroot->xfer_mtx);
3387 
3388 		/* restore BUS mutex */
3389 		while (drop_bus--)
3390 			mtx_lock(&bus->bus_mtx);
3391 
3392 		/* restore BUS spin mutex */
3393 		while (drop_bus_spin--)
3394 			mtx_lock_spin(&bus->bus_spin_lock);
3395 	}
3396 }
3397 
3398 static void
3399 usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
3400     uint8_t type, enum usb_dev_speed speed)
3401 {
3402 	static const uint16_t intr_range_max[USB_SPEED_MAX] = {
3403 		[USB_SPEED_LOW] = 8,
3404 		[USB_SPEED_FULL] = 64,
3405 		[USB_SPEED_HIGH] = 1024,
3406 		[USB_SPEED_VARIABLE] = 1024,
3407 		[USB_SPEED_SUPER] = 1024,
3408 	};
3409 
3410 	static const uint16_t isoc_range_max[USB_SPEED_MAX] = {
3411 		[USB_SPEED_LOW] = 0,	/* invalid */
3412 		[USB_SPEED_FULL] = 1023,
3413 		[USB_SPEED_HIGH] = 1024,
3414 		[USB_SPEED_VARIABLE] = 3584,
3415 		[USB_SPEED_SUPER] = 1024,
3416 	};
3417 
3418 	static const uint16_t control_min[USB_SPEED_MAX] = {
3419 		[USB_SPEED_LOW] = 8,
3420 		[USB_SPEED_FULL] = 8,
3421 		[USB_SPEED_HIGH] = 64,
3422 		[USB_SPEED_VARIABLE] = 512,
3423 		[USB_SPEED_SUPER] = 512,
3424 	};
3425 
3426 	static const uint16_t bulk_min[USB_SPEED_MAX] = {
3427 		[USB_SPEED_LOW] = 8,
3428 		[USB_SPEED_FULL] = 8,
3429 		[USB_SPEED_HIGH] = 512,
3430 		[USB_SPEED_VARIABLE] = 512,
3431 		[USB_SPEED_SUPER] = 1024,
3432 	};
3433 
3434 	uint16_t temp;
3435 
3436 	memset(ptr, 0, sizeof(*ptr));
3437 
3438 	switch (type) {
3439 	case UE_INTERRUPT:
3440 		ptr->range.max = intr_range_max[speed];
3441 		break;
3442 	case UE_ISOCHRONOUS:
3443 		ptr->range.max = isoc_range_max[speed];
3444 		break;
3445 	default:
3446 		if (type == UE_BULK)
3447 			temp = bulk_min[speed];
3448 		else /* UE_CONTROL */
3449 			temp = control_min[speed];
3450 
3451 		/* default is fixed */
3452 		ptr->fixed[0] = temp;
3453 		ptr->fixed[1] = temp;
3454 		ptr->fixed[2] = temp;
3455 		ptr->fixed[3] = temp;
3456 
3457 		if (speed == USB_SPEED_FULL) {
3458 			/* multiple sizes */
3459 			ptr->fixed[1] = 16;
3460 			ptr->fixed[2] = 32;
3461 			ptr->fixed[3] = 64;
3462 		}
3463 		if ((speed == USB_SPEED_VARIABLE) &&
3464 		    (type == UE_BULK)) {
3465 			/* multiple sizes */
3466 			ptr->fixed[2] = 1024;
3467 			ptr->fixed[3] = 1536;
3468 		}
3469 		break;
3470 	}
3471 }
3472 
3473 void	*
3474 usbd_xfer_softc(struct usb_xfer *xfer)
3475 {
3476 	return (xfer->priv_sc);
3477 }
3478 
3479 void *
3480 usbd_xfer_get_priv(struct usb_xfer *xfer)
3481 {
3482 	return (xfer->priv_fifo);
3483 }
3484 
3485 void
3486 usbd_xfer_set_priv(struct usb_xfer *xfer, void *ptr)
3487 {
3488 	xfer->priv_fifo = ptr;
3489 }
3490 
3491 uint8_t
3492 usbd_xfer_state(struct usb_xfer *xfer)
3493 {
3494 	return (xfer->usb_state);
3495 }
3496 
3497 void
3498 usbd_xfer_set_flag(struct usb_xfer *xfer, int flag)
3499 {
3500 	switch (flag) {
3501 		case USB_FORCE_SHORT_XFER:
3502 			xfer->flags.force_short_xfer = 1;
3503 			break;
3504 		case USB_SHORT_XFER_OK:
3505 			xfer->flags.short_xfer_ok = 1;
3506 			break;
3507 		case USB_MULTI_SHORT_OK:
3508 			xfer->flags.short_frames_ok = 1;
3509 			break;
3510 		case USB_MANUAL_STATUS:
3511 			xfer->flags.manual_status = 1;
3512 			break;
3513 	}
3514 }
3515 
3516 void
3517 usbd_xfer_clr_flag(struct usb_xfer *xfer, int flag)
3518 {
3519 	switch (flag) {
3520 		case USB_FORCE_SHORT_XFER:
3521 			xfer->flags.force_short_xfer = 0;
3522 			break;
3523 		case USB_SHORT_XFER_OK:
3524 			xfer->flags.short_xfer_ok = 0;
3525 			break;
3526 		case USB_MULTI_SHORT_OK:
3527 			xfer->flags.short_frames_ok = 0;
3528 			break;
3529 		case USB_MANUAL_STATUS:
3530 			xfer->flags.manual_status = 0;
3531 			break;
3532 	}
3533 }
3534 
3535 /*
3536  * The following function returns in milliseconds when the isochronous
3537  * transfer was completed by the hardware. The returned value wraps
3538  * around 65536 milliseconds.
3539  */
3540 uint16_t
3541 usbd_xfer_get_timestamp(struct usb_xfer *xfer)
3542 {
3543 	return (xfer->isoc_time_complete);
3544 }
3545 
3546 /*
3547  * The following function returns non-zero if the max packet size
3548  * field was clamped to a valid value. Else it returns zero.
3549  */
3550 uint8_t
3551 usbd_xfer_maxp_was_clamped(struct usb_xfer *xfer)
3552 {
3553 	return (xfer->flags_int.maxp_was_clamped);
3554 }
3555