xref: /freebsd/sys/dev/usb/usb_transfer.c (revision 3c5ba95ad12285ad37c182a4bfc1b240ec6d18a7)
1 /* $FreeBSD$ */
2 /*-
3  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4  *
5  * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #ifdef USB_GLOBAL_INCLUDE_FILE
30 #include USB_GLOBAL_INCLUDE_FILE
31 #else
32 #include <sys/stdint.h>
33 #include <sys/stddef.h>
34 #include <sys/param.h>
35 #include <sys/queue.h>
36 #include <sys/types.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/bus.h>
40 #include <sys/module.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/condvar.h>
44 #include <sys/sysctl.h>
45 #include <sys/sx.h>
46 #include <sys/unistd.h>
47 #include <sys/callout.h>
48 #include <sys/malloc.h>
49 #include <sys/priv.h>
50 
51 #include <dev/usb/usb.h>
52 #include <dev/usb/usbdi.h>
53 #include <dev/usb/usbdi_util.h>
54 
55 #define	USB_DEBUG_VAR usb_debug
56 
57 #include <dev/usb/usb_core.h>
58 #include <dev/usb/usb_busdma.h>
59 #include <dev/usb/usb_process.h>
60 #include <dev/usb/usb_transfer.h>
61 #include <dev/usb/usb_device.h>
62 #include <dev/usb/usb_debug.h>
63 #include <dev/usb/usb_util.h>
64 
65 #include <dev/usb/usb_controller.h>
66 #include <dev/usb/usb_bus.h>
67 #include <dev/usb/usb_pf.h>
68 #endif			/* USB_GLOBAL_INCLUDE_FILE */
69 
70 struct usb_std_packet_size {
71 	struct {
72 		uint16_t min;		/* inclusive */
73 		uint16_t max;		/* inclusive */
74 	}	range;
75 
76 	uint16_t fixed[4];
77 };
78 
79 static usb_callback_t usb_request_callback;
80 
81 static const struct usb_config usb_control_ep_cfg[USB_CTRL_XFER_MAX] = {
82 
83 	/* This transfer is used for generic control endpoint transfers */
84 
85 	[0] = {
86 		.type = UE_CONTROL,
87 		.endpoint = 0x00,	/* Control endpoint */
88 		.direction = UE_DIR_ANY,
89 		.bufsize = USB_EP0_BUFSIZE,	/* bytes */
90 		.flags = {.proxy_buffer = 1,},
91 		.callback = &usb_request_callback,
92 		.usb_mode = USB_MODE_DUAL,	/* both modes */
93 	},
94 
95 	/* This transfer is used for generic clear stall only */
96 
97 	[1] = {
98 		.type = UE_CONTROL,
99 		.endpoint = 0x00,	/* Control pipe */
100 		.direction = UE_DIR_ANY,
101 		.bufsize = sizeof(struct usb_device_request),
102 		.callback = &usb_do_clear_stall_callback,
103 		.timeout = 1000,	/* 1 second */
104 		.interval = 50,	/* 50ms */
105 		.usb_mode = USB_MODE_HOST,
106 	},
107 };
108 
109 /* function prototypes */
110 
111 static void	usbd_update_max_frame_size(struct usb_xfer *);
112 static void	usbd_transfer_unsetup_sub(struct usb_xfer_root *, uint8_t);
113 static void	usbd_control_transfer_init(struct usb_xfer *);
114 static int	usbd_setup_ctrl_transfer(struct usb_xfer *);
115 static void	usb_callback_proc(struct usb_proc_msg *);
116 static void	usbd_callback_ss_done_defer(struct usb_xfer *);
117 static void	usbd_callback_wrapper(struct usb_xfer_queue *);
118 static void	usbd_transfer_start_cb(void *);
119 static uint8_t	usbd_callback_wrapper_sub(struct usb_xfer *);
120 static void	usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
121 		    uint8_t type, enum usb_dev_speed speed);
122 
123 /*------------------------------------------------------------------------*
124  *	usb_request_callback
125  *------------------------------------------------------------------------*/
126 static void
127 usb_request_callback(struct usb_xfer *xfer, usb_error_t error)
128 {
129 	if (xfer->flags_int.usb_mode == USB_MODE_DEVICE)
130 		usb_handle_request_callback(xfer, error);
131 	else
132 		usbd_do_request_callback(xfer, error);
133 }
134 
135 /*------------------------------------------------------------------------*
136  *	usbd_update_max_frame_size
137  *
138  * This function updates the maximum frame size, hence high speed USB
139  * can transfer multiple consecutive packets.
140  *------------------------------------------------------------------------*/
141 static void
142 usbd_update_max_frame_size(struct usb_xfer *xfer)
143 {
144 	/* compute maximum frame size */
145 	/* this computation should not overflow 16-bit */
146 	/* max = 15 * 1024 */
147 
148 	xfer->max_frame_size = xfer->max_packet_size * xfer->max_packet_count;
149 }
150 
151 /*------------------------------------------------------------------------*
152  *	usbd_get_dma_delay
153  *
154  * The following function is called when we need to
155  * synchronize with DMA hardware.
156  *
157  * Returns:
158  *    0: no DMA delay required
159  * Else: milliseconds of DMA delay
160  *------------------------------------------------------------------------*/
161 usb_timeout_t
162 usbd_get_dma_delay(struct usb_device *udev)
163 {
164 	const struct usb_bus_methods *mtod;
165 	uint32_t temp;
166 
167 	mtod = udev->bus->methods;
168 	temp = 0;
169 
170 	if (mtod->get_dma_delay) {
171 		(mtod->get_dma_delay) (udev, &temp);
172 		/*
173 		 * Round up and convert to milliseconds. Note that we use
174 		 * 1024 milliseconds per second. to save a division.
175 		 */
176 		temp += 0x3FF;
177 		temp /= 0x400;
178 	}
179 	return (temp);
180 }
181 
182 /*------------------------------------------------------------------------*
183  *	usbd_transfer_setup_sub_malloc
184  *
185  * This function will allocate one or more DMA'able memory chunks
186  * according to "size", "align" and "count" arguments. "ppc" is
187  * pointed to a linear array of USB page caches afterwards.
188  *
189  * If the "align" argument is equal to "1" a non-contiguous allocation
190  * can happen. Else if the "align" argument is greater than "1", the
191  * allocation will always be contiguous in memory.
192  *
193  * Returns:
194  *    0: Success
195  * Else: Failure
196  *------------------------------------------------------------------------*/
197 #if USB_HAVE_BUSDMA
198 uint8_t
199 usbd_transfer_setup_sub_malloc(struct usb_setup_params *parm,
200     struct usb_page_cache **ppc, usb_size_t size, usb_size_t align,
201     usb_size_t count)
202 {
203 	struct usb_page_cache *pc;
204 	struct usb_page *pg;
205 	void *buf;
206 	usb_size_t n_dma_pc;
207 	usb_size_t n_dma_pg;
208 	usb_size_t n_obj;
209 	usb_size_t x;
210 	usb_size_t y;
211 	usb_size_t r;
212 	usb_size_t z;
213 
214 	USB_ASSERT(align > 0, ("Invalid alignment, 0x%08x\n",
215 	    align));
216 	USB_ASSERT(size > 0, ("Invalid size = 0\n"));
217 
218 	if (count == 0) {
219 		return (0);		/* nothing to allocate */
220 	}
221 	/*
222 	 * Make sure that the size is aligned properly.
223 	 */
224 	size = -((-size) & (-align));
225 
226 	/*
227 	 * Try multi-allocation chunks to reduce the number of DMA
228 	 * allocations, hence DMA allocations are slow.
229 	 */
230 	if (align == 1) {
231 		/* special case - non-cached multi page DMA memory */
232 		n_dma_pc = count;
233 		n_dma_pg = (2 + (size / USB_PAGE_SIZE));
234 		n_obj = 1;
235 	} else if (size >= USB_PAGE_SIZE) {
236 		n_dma_pc = count;
237 		n_dma_pg = 1;
238 		n_obj = 1;
239 	} else {
240 		/* compute number of objects per page */
241 #ifdef USB_DMA_SINGLE_ALLOC
242 		n_obj = 1;
243 #else
244 		n_obj = (USB_PAGE_SIZE / size);
245 #endif
246 		/*
247 		 * Compute number of DMA chunks, rounded up
248 		 * to nearest one:
249 		 */
250 		n_dma_pc = howmany(count, n_obj);
251 		n_dma_pg = 1;
252 	}
253 
254 	/*
255 	 * DMA memory is allocated once, but mapped twice. That's why
256 	 * there is one list for auto-free and another list for
257 	 * non-auto-free which only holds the mapping and not the
258 	 * allocation.
259 	 */
260 	if (parm->buf == NULL) {
261 		/* reserve memory (auto-free) */
262 		parm->dma_page_ptr += n_dma_pc * n_dma_pg;
263 		parm->dma_page_cache_ptr += n_dma_pc;
264 
265 		/* reserve memory (no-auto-free) */
266 		parm->dma_page_ptr += count * n_dma_pg;
267 		parm->xfer_page_cache_ptr += count;
268 		return (0);
269 	}
270 	for (x = 0; x != n_dma_pc; x++) {
271 		/* need to initialize the page cache */
272 		parm->dma_page_cache_ptr[x].tag_parent =
273 		    &parm->curr_xfer->xroot->dma_parent_tag;
274 	}
275 	for (x = 0; x != count; x++) {
276 		/* need to initialize the page cache */
277 		parm->xfer_page_cache_ptr[x].tag_parent =
278 		    &parm->curr_xfer->xroot->dma_parent_tag;
279 	}
280 
281 	if (ppc != NULL) {
282 		if (n_obj != 1)
283 			*ppc = parm->xfer_page_cache_ptr;
284 		else
285 			*ppc = parm->dma_page_cache_ptr;
286 	}
287 	r = count;			/* set remainder count */
288 	z = n_obj * size;		/* set allocation size */
289 	pc = parm->xfer_page_cache_ptr;
290 	pg = parm->dma_page_ptr;
291 
292 	if (n_obj == 1) {
293 	    /*
294 	     * Avoid mapping memory twice if only a single object
295 	     * should be allocated per page cache:
296 	     */
297 	    for (x = 0; x != n_dma_pc; x++) {
298 		if (usb_pc_alloc_mem(parm->dma_page_cache_ptr,
299 		    pg, z, align)) {
300 			return (1);	/* failure */
301 		}
302 		/* Make room for one DMA page cache and "n_dma_pg" pages */
303 		parm->dma_page_cache_ptr++;
304 		pg += n_dma_pg;
305 	    }
306 	} else {
307 	    for (x = 0; x != n_dma_pc; x++) {
308 
309 		if (r < n_obj) {
310 			/* compute last remainder */
311 			z = r * size;
312 			n_obj = r;
313 		}
314 		if (usb_pc_alloc_mem(parm->dma_page_cache_ptr,
315 		    pg, z, align)) {
316 			return (1);	/* failure */
317 		}
318 		/* Set beginning of current buffer */
319 		buf = parm->dma_page_cache_ptr->buffer;
320 		/* Make room for one DMA page cache and "n_dma_pg" pages */
321 		parm->dma_page_cache_ptr++;
322 		pg += n_dma_pg;
323 
324 		for (y = 0; (y != n_obj); y++, r--, pc++, pg += n_dma_pg) {
325 
326 			/* Load sub-chunk into DMA */
327 			if (usb_pc_dmamap_create(pc, size)) {
328 				return (1);	/* failure */
329 			}
330 			pc->buffer = USB_ADD_BYTES(buf, y * size);
331 			pc->page_start = pg;
332 
333 			USB_MTX_LOCK(pc->tag_parent->mtx);
334 			if (usb_pc_load_mem(pc, size, 1 /* synchronous */ )) {
335 				USB_MTX_UNLOCK(pc->tag_parent->mtx);
336 				return (1);	/* failure */
337 			}
338 			USB_MTX_UNLOCK(pc->tag_parent->mtx);
339 		}
340 	    }
341 	}
342 
343 	parm->xfer_page_cache_ptr = pc;
344 	parm->dma_page_ptr = pg;
345 	return (0);
346 }
347 #endif
348 
349 /*------------------------------------------------------------------------*
350  *	usbd_transfer_setup_sub - transfer setup subroutine
351  *
352  * This function must be called from the "xfer_setup" callback of the
353  * USB Host or Device controller driver when setting up an USB
354  * transfer. This function will setup correct packet sizes, buffer
355  * sizes, flags and more, that are stored in the "usb_xfer"
356  * structure.
357  *------------------------------------------------------------------------*/
358 void
359 usbd_transfer_setup_sub(struct usb_setup_params *parm)
360 {
361 	enum {
362 		REQ_SIZE = 8,
363 		MIN_PKT = 8,
364 	};
365 	struct usb_xfer *xfer = parm->curr_xfer;
366 	const struct usb_config *setup = parm->curr_setup;
367 	struct usb_endpoint_ss_comp_descriptor *ecomp;
368 	struct usb_endpoint_descriptor *edesc;
369 	struct usb_std_packet_size std_size;
370 	usb_frcount_t n_frlengths;
371 	usb_frcount_t n_frbuffers;
372 	usb_frcount_t x;
373 	uint16_t maxp_old;
374 	uint8_t type;
375 	uint8_t zmps;
376 
377 	/*
378 	 * Sanity check. The following parameters must be initialized before
379 	 * calling this function.
380 	 */
381 	if ((parm->hc_max_packet_size == 0) ||
382 	    (parm->hc_max_packet_count == 0) ||
383 	    (parm->hc_max_frame_size == 0)) {
384 		parm->err = USB_ERR_INVAL;
385 		goto done;
386 	}
387 	edesc = xfer->endpoint->edesc;
388 	ecomp = xfer->endpoint->ecomp;
389 
390 	type = (edesc->bmAttributes & UE_XFERTYPE);
391 
392 	xfer->flags = setup->flags;
393 	xfer->nframes = setup->frames;
394 	xfer->timeout = setup->timeout;
395 	xfer->callback = setup->callback;
396 	xfer->interval = setup->interval;
397 	xfer->endpointno = edesc->bEndpointAddress;
398 	xfer->max_packet_size = UGETW(edesc->wMaxPacketSize);
399 	xfer->max_packet_count = 1;
400 	/* make a shadow copy: */
401 	xfer->flags_int.usb_mode = parm->udev->flags.usb_mode;
402 
403 	parm->bufsize = setup->bufsize;
404 
405 	switch (parm->speed) {
406 	case USB_SPEED_HIGH:
407 		switch (type) {
408 		case UE_ISOCHRONOUS:
409 		case UE_INTERRUPT:
410 			xfer->max_packet_count +=
411 			    (xfer->max_packet_size >> 11) & 3;
412 
413 			/* check for invalid max packet count */
414 			if (xfer->max_packet_count > 3)
415 				xfer->max_packet_count = 3;
416 			break;
417 		default:
418 			break;
419 		}
420 		xfer->max_packet_size &= 0x7FF;
421 		break;
422 	case USB_SPEED_SUPER:
423 		xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3;
424 
425 		if (ecomp != NULL)
426 			xfer->max_packet_count += ecomp->bMaxBurst;
427 
428 		if ((xfer->max_packet_count == 0) ||
429 		    (xfer->max_packet_count > 16))
430 			xfer->max_packet_count = 16;
431 
432 		switch (type) {
433 		case UE_CONTROL:
434 			xfer->max_packet_count = 1;
435 			break;
436 		case UE_ISOCHRONOUS:
437 			if (ecomp != NULL) {
438 				uint8_t mult;
439 
440 				mult = UE_GET_SS_ISO_MULT(
441 				    ecomp->bmAttributes) + 1;
442 				if (mult > 3)
443 					mult = 3;
444 
445 				xfer->max_packet_count *= mult;
446 			}
447 			break;
448 		default:
449 			break;
450 		}
451 		xfer->max_packet_size &= 0x7FF;
452 		break;
453 	default:
454 		break;
455 	}
456 	/* range check "max_packet_count" */
457 
458 	if (xfer->max_packet_count > parm->hc_max_packet_count) {
459 		xfer->max_packet_count = parm->hc_max_packet_count;
460 	}
461 
462 	/* store max packet size value before filtering */
463 
464 	maxp_old = xfer->max_packet_size;
465 
466 	/* filter "wMaxPacketSize" according to HC capabilities */
467 
468 	if ((xfer->max_packet_size > parm->hc_max_packet_size) ||
469 	    (xfer->max_packet_size == 0)) {
470 		xfer->max_packet_size = parm->hc_max_packet_size;
471 	}
472 	/* filter "wMaxPacketSize" according to standard sizes */
473 
474 	usbd_get_std_packet_size(&std_size, type, parm->speed);
475 
476 	if (std_size.range.min || std_size.range.max) {
477 
478 		if (xfer->max_packet_size < std_size.range.min) {
479 			xfer->max_packet_size = std_size.range.min;
480 		}
481 		if (xfer->max_packet_size > std_size.range.max) {
482 			xfer->max_packet_size = std_size.range.max;
483 		}
484 	} else {
485 
486 		if (xfer->max_packet_size >= std_size.fixed[3]) {
487 			xfer->max_packet_size = std_size.fixed[3];
488 		} else if (xfer->max_packet_size >= std_size.fixed[2]) {
489 			xfer->max_packet_size = std_size.fixed[2];
490 		} else if (xfer->max_packet_size >= std_size.fixed[1]) {
491 			xfer->max_packet_size = std_size.fixed[1];
492 		} else {
493 			/* only one possibility left */
494 			xfer->max_packet_size = std_size.fixed[0];
495 		}
496 	}
497 
498 	/*
499 	 * Check if the max packet size was outside its allowed range
500 	 * and clamped to a valid value:
501 	 */
502 	if (maxp_old != xfer->max_packet_size)
503 		xfer->flags_int.maxp_was_clamped = 1;
504 
505 	/* compute "max_frame_size" */
506 
507 	usbd_update_max_frame_size(xfer);
508 
509 	/* check interrupt interval and transfer pre-delay */
510 
511 	if (type == UE_ISOCHRONOUS) {
512 
513 		uint16_t frame_limit;
514 
515 		xfer->interval = 0;	/* not used, must be zero */
516 		xfer->flags_int.isochronous_xfr = 1;	/* set flag */
517 
518 		if (xfer->timeout == 0) {
519 			/*
520 			 * set a default timeout in
521 			 * case something goes wrong!
522 			 */
523 			xfer->timeout = 1000 / 4;
524 		}
525 		switch (parm->speed) {
526 		case USB_SPEED_LOW:
527 		case USB_SPEED_FULL:
528 			frame_limit = USB_MAX_FS_ISOC_FRAMES_PER_XFER;
529 			xfer->fps_shift = 0;
530 			break;
531 		default:
532 			frame_limit = USB_MAX_HS_ISOC_FRAMES_PER_XFER;
533 			xfer->fps_shift = edesc->bInterval;
534 			if (xfer->fps_shift > 0)
535 				xfer->fps_shift--;
536 			if (xfer->fps_shift > 3)
537 				xfer->fps_shift = 3;
538 			if (xfer->flags.pre_scale_frames != 0)
539 				xfer->nframes <<= (3 - xfer->fps_shift);
540 			break;
541 		}
542 
543 		if (xfer->nframes > frame_limit) {
544 			/*
545 			 * this is not going to work
546 			 * cross hardware
547 			 */
548 			parm->err = USB_ERR_INVAL;
549 			goto done;
550 		}
551 		if (xfer->nframes == 0) {
552 			/*
553 			 * this is not a valid value
554 			 */
555 			parm->err = USB_ERR_ZERO_NFRAMES;
556 			goto done;
557 		}
558 	} else {
559 
560 		/*
561 		 * If a value is specified use that else check the
562 		 * endpoint descriptor!
563 		 */
564 		if (type == UE_INTERRUPT) {
565 
566 			uint32_t temp;
567 
568 			if (xfer->interval == 0) {
569 
570 				xfer->interval = edesc->bInterval;
571 
572 				switch (parm->speed) {
573 				case USB_SPEED_LOW:
574 				case USB_SPEED_FULL:
575 					break;
576 				default:
577 					/* 125us -> 1ms */
578 					if (xfer->interval < 4)
579 						xfer->interval = 1;
580 					else if (xfer->interval > 16)
581 						xfer->interval = (1 << (16 - 4));
582 					else
583 						xfer->interval =
584 						    (1 << (xfer->interval - 4));
585 					break;
586 				}
587 			}
588 
589 			if (xfer->interval == 0) {
590 				/*
591 				 * One millisecond is the smallest
592 				 * interval we support:
593 				 */
594 				xfer->interval = 1;
595 			}
596 
597 			xfer->fps_shift = 0;
598 			temp = 1;
599 
600 			while ((temp != 0) && (temp < xfer->interval)) {
601 				xfer->fps_shift++;
602 				temp *= 2;
603 			}
604 
605 			switch (parm->speed) {
606 			case USB_SPEED_LOW:
607 			case USB_SPEED_FULL:
608 				break;
609 			default:
610 				xfer->fps_shift += 3;
611 				break;
612 			}
613 		}
614 	}
615 
616 	/*
617 	 * NOTE: we do not allow "max_packet_size" or "max_frame_size"
618 	 * to be equal to zero when setting up USB transfers, hence
619 	 * this leads to a lot of extra code in the USB kernel.
620 	 */
621 
622 	if ((xfer->max_frame_size == 0) ||
623 	    (xfer->max_packet_size == 0)) {
624 
625 		zmps = 1;
626 
627 		if ((parm->bufsize <= MIN_PKT) &&
628 		    (type != UE_CONTROL) &&
629 		    (type != UE_BULK)) {
630 
631 			/* workaround */
632 			xfer->max_packet_size = MIN_PKT;
633 			xfer->max_packet_count = 1;
634 			parm->bufsize = 0;	/* automatic setup length */
635 			usbd_update_max_frame_size(xfer);
636 
637 		} else {
638 			parm->err = USB_ERR_ZERO_MAXP;
639 			goto done;
640 		}
641 
642 	} else {
643 		zmps = 0;
644 	}
645 
646 	/*
647 	 * check if we should setup a default
648 	 * length:
649 	 */
650 
651 	if (parm->bufsize == 0) {
652 
653 		parm->bufsize = xfer->max_frame_size;
654 
655 		if (type == UE_ISOCHRONOUS) {
656 			parm->bufsize *= xfer->nframes;
657 		}
658 	}
659 	/*
660 	 * check if we are about to setup a proxy
661 	 * type of buffer:
662 	 */
663 
664 	if (xfer->flags.proxy_buffer) {
665 
666 		/* round bufsize up */
667 
668 		parm->bufsize += (xfer->max_frame_size - 1);
669 
670 		if (parm->bufsize < xfer->max_frame_size) {
671 			/* length wrapped around */
672 			parm->err = USB_ERR_INVAL;
673 			goto done;
674 		}
675 		/* subtract remainder */
676 
677 		parm->bufsize -= (parm->bufsize % xfer->max_frame_size);
678 
679 		/* add length of USB device request structure, if any */
680 
681 		if (type == UE_CONTROL) {
682 			parm->bufsize += REQ_SIZE;	/* SETUP message */
683 		}
684 	}
685 	xfer->max_data_length = parm->bufsize;
686 
687 	/* Setup "n_frlengths" and "n_frbuffers" */
688 
689 	if (type == UE_ISOCHRONOUS) {
690 		n_frlengths = xfer->nframes;
691 		n_frbuffers = 1;
692 	} else {
693 
694 		if (type == UE_CONTROL) {
695 			xfer->flags_int.control_xfr = 1;
696 			if (xfer->nframes == 0) {
697 				if (parm->bufsize <= REQ_SIZE) {
698 					/*
699 					 * there will never be any data
700 					 * stage
701 					 */
702 					xfer->nframes = 1;
703 				} else {
704 					xfer->nframes = 2;
705 				}
706 			}
707 		} else {
708 			if (xfer->nframes == 0) {
709 				xfer->nframes = 1;
710 			}
711 		}
712 
713 		n_frlengths = xfer->nframes;
714 		n_frbuffers = xfer->nframes;
715 	}
716 
717 	/*
718 	 * check if we have room for the
719 	 * USB device request structure:
720 	 */
721 
722 	if (type == UE_CONTROL) {
723 
724 		if (xfer->max_data_length < REQ_SIZE) {
725 			/* length wrapped around or too small bufsize */
726 			parm->err = USB_ERR_INVAL;
727 			goto done;
728 		}
729 		xfer->max_data_length -= REQ_SIZE;
730 	}
731 	/*
732 	 * Setup "frlengths" and shadow "frlengths" for keeping the
733 	 * initial frame lengths when a USB transfer is complete. This
734 	 * information is useful when computing isochronous offsets.
735 	 */
736 	xfer->frlengths = parm->xfer_length_ptr;
737 	parm->xfer_length_ptr += 2 * n_frlengths;
738 
739 	/* setup "frbuffers" */
740 	xfer->frbuffers = parm->xfer_page_cache_ptr;
741 	parm->xfer_page_cache_ptr += n_frbuffers;
742 
743 	/* initialize max frame count */
744 	xfer->max_frame_count = xfer->nframes;
745 
746 	/*
747 	 * check if we need to setup
748 	 * a local buffer:
749 	 */
750 
751 	if (!xfer->flags.ext_buffer) {
752 #if USB_HAVE_BUSDMA
753 		struct usb_page_search page_info;
754 		struct usb_page_cache *pc;
755 
756 		if (usbd_transfer_setup_sub_malloc(parm,
757 		    &pc, parm->bufsize, 1, 1)) {
758 			parm->err = USB_ERR_NOMEM;
759 		} else if (parm->buf != NULL) {
760 
761 			usbd_get_page(pc, 0, &page_info);
762 
763 			xfer->local_buffer = page_info.buffer;
764 
765 			usbd_xfer_set_frame_offset(xfer, 0, 0);
766 
767 			if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
768 				usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
769 			}
770 		}
771 #else
772 		/* align data */
773 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
774 
775 		if (parm->buf != NULL) {
776 			xfer->local_buffer =
777 			    USB_ADD_BYTES(parm->buf, parm->size[0]);
778 
779 			usbd_xfer_set_frame_offset(xfer, 0, 0);
780 
781 			if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
782 				usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
783 			}
784 		}
785 		parm->size[0] += parm->bufsize;
786 
787 		/* align data again */
788 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
789 #endif
790 	}
791 	/*
792 	 * Compute maximum buffer size
793 	 */
794 
795 	if (parm->bufsize_max < parm->bufsize) {
796 		parm->bufsize_max = parm->bufsize;
797 	}
798 #if USB_HAVE_BUSDMA
799 	if (xfer->flags_int.bdma_enable) {
800 		/*
801 		 * Setup "dma_page_ptr".
802 		 *
803 		 * Proof for formula below:
804 		 *
805 		 * Assume there are three USB frames having length "a", "b" and
806 		 * "c". These USB frames will at maximum need "z"
807 		 * "usb_page" structures. "z" is given by:
808 		 *
809 		 * z = ((a / USB_PAGE_SIZE) + 2) + ((b / USB_PAGE_SIZE) + 2) +
810 		 * ((c / USB_PAGE_SIZE) + 2);
811 		 *
812 		 * Constraining "a", "b" and "c" like this:
813 		 *
814 		 * (a + b + c) <= parm->bufsize
815 		 *
816 		 * We know that:
817 		 *
818 		 * z <= ((parm->bufsize / USB_PAGE_SIZE) + (3*2));
819 		 *
820 		 * Here is the general formula:
821 		 */
822 		xfer->dma_page_ptr = parm->dma_page_ptr;
823 		parm->dma_page_ptr += (2 * n_frbuffers);
824 		parm->dma_page_ptr += (parm->bufsize / USB_PAGE_SIZE);
825 	}
826 #endif
827 	if (zmps) {
828 		/* correct maximum data length */
829 		xfer->max_data_length = 0;
830 	}
831 	/* subtract USB frame remainder from "hc_max_frame_size" */
832 
833 	xfer->max_hc_frame_size =
834 	    (parm->hc_max_frame_size -
835 	    (parm->hc_max_frame_size % xfer->max_frame_size));
836 
837 	if (xfer->max_hc_frame_size == 0) {
838 		parm->err = USB_ERR_INVAL;
839 		goto done;
840 	}
841 
842 	/* initialize frame buffers */
843 
844 	if (parm->buf) {
845 		for (x = 0; x != n_frbuffers; x++) {
846 			xfer->frbuffers[x].tag_parent =
847 			    &xfer->xroot->dma_parent_tag;
848 #if USB_HAVE_BUSDMA
849 			if (xfer->flags_int.bdma_enable &&
850 			    (parm->bufsize_max > 0)) {
851 
852 				if (usb_pc_dmamap_create(
853 				    xfer->frbuffers + x,
854 				    parm->bufsize_max)) {
855 					parm->err = USB_ERR_NOMEM;
856 					goto done;
857 				}
858 			}
859 #endif
860 		}
861 	}
862 done:
863 	if (parm->err) {
864 		/*
865 		 * Set some dummy values so that we avoid division by zero:
866 		 */
867 		xfer->max_hc_frame_size = 1;
868 		xfer->max_frame_size = 1;
869 		xfer->max_packet_size = 1;
870 		xfer->max_data_length = 0;
871 		xfer->nframes = 0;
872 		xfer->max_frame_count = 0;
873 	}
874 }
875 
876 static uint8_t
877 usbd_transfer_setup_has_bulk(const struct usb_config *setup_start,
878     uint16_t n_setup)
879 {
880 	while (n_setup--) {
881 		uint8_t type = setup_start[n_setup].type;
882 		if (type == UE_BULK || type == UE_BULK_INTR ||
883 		    type == UE_TYPE_ANY)
884 			return (1);
885 	}
886 	return (0);
887 }
888 
889 /*------------------------------------------------------------------------*
890  *	usbd_transfer_setup - setup an array of USB transfers
891  *
892  * NOTE: You must always call "usbd_transfer_unsetup" after calling
893  * "usbd_transfer_setup" if success was returned.
894  *
895  * The idea is that the USB device driver should pre-allocate all its
896  * transfers by one call to this function.
897  *
898  * Return values:
899  *    0: Success
900  * Else: Failure
901  *------------------------------------------------------------------------*/
902 usb_error_t
903 usbd_transfer_setup(struct usb_device *udev,
904     const uint8_t *ifaces, struct usb_xfer **ppxfer,
905     const struct usb_config *setup_start, uint16_t n_setup,
906     void *priv_sc, struct mtx *xfer_mtx)
907 {
908 	const struct usb_config *setup_end = setup_start + n_setup;
909 	const struct usb_config *setup;
910 	struct usb_setup_params *parm;
911 	struct usb_endpoint *ep;
912 	struct usb_xfer_root *info;
913 	struct usb_xfer *xfer;
914 	void *buf = NULL;
915 	usb_error_t error = 0;
916 	uint16_t n;
917 	uint16_t refcount;
918 	uint8_t do_unlock;
919 
920 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
921 	    "usbd_transfer_setup can sleep!");
922 
923 	/* do some checking first */
924 
925 	if (n_setup == 0) {
926 		DPRINTFN(6, "setup array has zero length!\n");
927 		return (USB_ERR_INVAL);
928 	}
929 	if (ifaces == NULL) {
930 		DPRINTFN(6, "ifaces array is NULL!\n");
931 		return (USB_ERR_INVAL);
932 	}
933 	if (xfer_mtx == NULL) {
934 		DPRINTFN(6, "using global lock\n");
935 		xfer_mtx = &Giant;
936 	}
937 
938 	/* more sanity checks */
939 
940 	for (setup = setup_start, n = 0;
941 	    setup != setup_end; setup++, n++) {
942 		if (setup->bufsize == (usb_frlength_t)-1) {
943 			error = USB_ERR_BAD_BUFSIZE;
944 			DPRINTF("invalid bufsize\n");
945 		}
946 		if (setup->callback == NULL) {
947 			error = USB_ERR_NO_CALLBACK;
948 			DPRINTF("no callback\n");
949 		}
950 		ppxfer[n] = NULL;
951 	}
952 
953 	if (error)
954 		return (error);
955 
956 	/* Protect scratch area */
957 	do_unlock = usbd_ctrl_lock(udev);
958 
959 	refcount = 0;
960 	info = NULL;
961 
962 	parm = &udev->scratch.xfer_setup[0].parm;
963 	memset(parm, 0, sizeof(*parm));
964 
965 	parm->udev = udev;
966 	parm->speed = usbd_get_speed(udev);
967 	parm->hc_max_packet_count = 1;
968 
969 	if (parm->speed >= USB_SPEED_MAX) {
970 		parm->err = USB_ERR_INVAL;
971 		goto done;
972 	}
973 	/* setup all transfers */
974 
975 	while (1) {
976 
977 		if (buf) {
978 			/*
979 			 * Initialize the "usb_xfer_root" structure,
980 			 * which is common for all our USB transfers.
981 			 */
982 			info = USB_ADD_BYTES(buf, 0);
983 
984 			info->memory_base = buf;
985 			info->memory_size = parm->size[0];
986 
987 #if USB_HAVE_BUSDMA
988 			info->dma_page_cache_start = USB_ADD_BYTES(buf, parm->size[4]);
989 			info->dma_page_cache_end = USB_ADD_BYTES(buf, parm->size[5]);
990 #endif
991 			info->xfer_page_cache_start = USB_ADD_BYTES(buf, parm->size[5]);
992 			info->xfer_page_cache_end = USB_ADD_BYTES(buf, parm->size[2]);
993 
994 			cv_init(&info->cv_drain, "WDRAIN");
995 
996 			info->xfer_mtx = xfer_mtx;
997 #if USB_HAVE_BUSDMA
998 			usb_dma_tag_setup(&info->dma_parent_tag,
999 			    parm->dma_tag_p, udev->bus->dma_parent_tag[0].tag,
1000 			    xfer_mtx, &usb_bdma_done_event, udev->bus->dma_bits,
1001 			    parm->dma_tag_max);
1002 #endif
1003 
1004 			info->bus = udev->bus;
1005 			info->udev = udev;
1006 
1007 			TAILQ_INIT(&info->done_q.head);
1008 			info->done_q.command = &usbd_callback_wrapper;
1009 #if USB_HAVE_BUSDMA
1010 			TAILQ_INIT(&info->dma_q.head);
1011 			info->dma_q.command = &usb_bdma_work_loop;
1012 #endif
1013 			info->done_m[0].hdr.pm_callback = &usb_callback_proc;
1014 			info->done_m[0].xroot = info;
1015 			info->done_m[1].hdr.pm_callback = &usb_callback_proc;
1016 			info->done_m[1].xroot = info;
1017 
1018 			/*
1019 			 * In device side mode control endpoint
1020 			 * requests need to run from a separate
1021 			 * context, else there is a chance of
1022 			 * deadlock!
1023 			 */
1024 			if (setup_start == usb_control_ep_cfg)
1025 				info->done_p =
1026 				    USB_BUS_CONTROL_XFER_PROC(udev->bus);
1027 			else if (xfer_mtx == &Giant)
1028 				info->done_p =
1029 				    USB_BUS_GIANT_PROC(udev->bus);
1030 			else if (usbd_transfer_setup_has_bulk(setup_start, n_setup))
1031 				info->done_p =
1032 				    USB_BUS_NON_GIANT_BULK_PROC(udev->bus);
1033 			else
1034 				info->done_p =
1035 				    USB_BUS_NON_GIANT_ISOC_PROC(udev->bus);
1036 		}
1037 		/* reset sizes */
1038 
1039 		parm->size[0] = 0;
1040 		parm->buf = buf;
1041 		parm->size[0] += sizeof(info[0]);
1042 
1043 		for (setup = setup_start, n = 0;
1044 		    setup != setup_end; setup++, n++) {
1045 
1046 			/* skip USB transfers without callbacks: */
1047 			if (setup->callback == NULL) {
1048 				continue;
1049 			}
1050 			/* see if there is a matching endpoint */
1051 			ep = usbd_get_endpoint(udev,
1052 			    ifaces[setup->if_index], setup);
1053 
1054 			/*
1055 			 * Check that the USB PIPE is valid and that
1056 			 * the endpoint mode is proper.
1057 			 *
1058 			 * Make sure we don't allocate a streams
1059 			 * transfer when such a combination is not
1060 			 * valid.
1061 			 */
1062 			if ((ep == NULL) || (ep->methods == NULL) ||
1063 			    ((ep->ep_mode != USB_EP_MODE_STREAMS) &&
1064 			    (ep->ep_mode != USB_EP_MODE_DEFAULT)) ||
1065 			    (setup->stream_id != 0 &&
1066 			    (setup->stream_id >= USB_MAX_EP_STREAMS ||
1067 			    (ep->ep_mode != USB_EP_MODE_STREAMS)))) {
1068 				if (setup->flags.no_pipe_ok)
1069 					continue;
1070 				if ((setup->usb_mode != USB_MODE_DUAL) &&
1071 				    (setup->usb_mode != udev->flags.usb_mode))
1072 					continue;
1073 				parm->err = USB_ERR_NO_PIPE;
1074 				goto done;
1075 			}
1076 
1077 			/* align data properly */
1078 			parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1079 
1080 			/* store current setup pointer */
1081 			parm->curr_setup = setup;
1082 
1083 			if (buf) {
1084 				/*
1085 				 * Common initialization of the
1086 				 * "usb_xfer" structure.
1087 				 */
1088 				xfer = USB_ADD_BYTES(buf, parm->size[0]);
1089 				xfer->address = udev->address;
1090 				xfer->priv_sc = priv_sc;
1091 				xfer->xroot = info;
1092 
1093 				usb_callout_init_mtx(&xfer->timeout_handle,
1094 				    &udev->bus->bus_mtx, 0);
1095 			} else {
1096 				/*
1097 				 * Setup a dummy xfer, hence we are
1098 				 * writing to the "usb_xfer"
1099 				 * structure pointed to by "xfer"
1100 				 * before we have allocated any
1101 				 * memory:
1102 				 */
1103 				xfer = &udev->scratch.xfer_setup[0].dummy;
1104 				memset(xfer, 0, sizeof(*xfer));
1105 				refcount++;
1106 			}
1107 
1108 			/* set transfer endpoint pointer */
1109 			xfer->endpoint = ep;
1110 
1111 			/* set transfer stream ID */
1112 			xfer->stream_id = setup->stream_id;
1113 
1114 			parm->size[0] += sizeof(xfer[0]);
1115 			parm->methods = xfer->endpoint->methods;
1116 			parm->curr_xfer = xfer;
1117 
1118 			/*
1119 			 * Call the Host or Device controller transfer
1120 			 * setup routine:
1121 			 */
1122 			(udev->bus->methods->xfer_setup) (parm);
1123 
1124 			/* check for error */
1125 			if (parm->err)
1126 				goto done;
1127 
1128 			if (buf) {
1129 				/*
1130 				 * Increment the endpoint refcount. This
1131 				 * basically prevents setting a new
1132 				 * configuration and alternate setting
1133 				 * when USB transfers are in use on
1134 				 * the given interface. Search the USB
1135 				 * code for "endpoint->refcount_alloc" if you
1136 				 * want more information.
1137 				 */
1138 				USB_BUS_LOCK(info->bus);
1139 				if (xfer->endpoint->refcount_alloc >= USB_EP_REF_MAX)
1140 					parm->err = USB_ERR_INVAL;
1141 
1142 				xfer->endpoint->refcount_alloc++;
1143 
1144 				if (xfer->endpoint->refcount_alloc == 0)
1145 					panic("usbd_transfer_setup(): Refcount wrapped to zero\n");
1146 				USB_BUS_UNLOCK(info->bus);
1147 
1148 				/*
1149 				 * Whenever we set ppxfer[] then we
1150 				 * also need to increment the
1151 				 * "setup_refcount":
1152 				 */
1153 				info->setup_refcount++;
1154 
1155 				/*
1156 				 * Transfer is successfully setup and
1157 				 * can be used:
1158 				 */
1159 				ppxfer[n] = xfer;
1160 			}
1161 
1162 			/* check for error */
1163 			if (parm->err)
1164 				goto done;
1165 		}
1166 
1167 		if (buf != NULL || parm->err != 0)
1168 			goto done;
1169 
1170 		/* if no transfers, nothing to do */
1171 		if (refcount == 0)
1172 			goto done;
1173 
1174 		/* align data properly */
1175 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1176 
1177 		/* store offset temporarily */
1178 		parm->size[1] = parm->size[0];
1179 
1180 		/*
1181 		 * The number of DMA tags required depends on
1182 		 * the number of endpoints. The current estimate
1183 		 * for maximum number of DMA tags per endpoint
1184 		 * is three:
1185 		 * 1) for loading memory
1186 		 * 2) for allocating memory
1187 		 * 3) for fixing memory [UHCI]
1188 		 */
1189 		parm->dma_tag_max += 3 * MIN(n_setup, USB_EP_MAX);
1190 
1191 		/*
1192 		 * DMA tags for QH, TD, Data and more.
1193 		 */
1194 		parm->dma_tag_max += 8;
1195 
1196 		parm->dma_tag_p += parm->dma_tag_max;
1197 
1198 		parm->size[0] += ((uint8_t *)parm->dma_tag_p) -
1199 		    ((uint8_t *)0);
1200 
1201 		/* align data properly */
1202 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1203 
1204 		/* store offset temporarily */
1205 		parm->size[3] = parm->size[0];
1206 
1207 		parm->size[0] += ((uint8_t *)parm->dma_page_ptr) -
1208 		    ((uint8_t *)0);
1209 
1210 		/* align data properly */
1211 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1212 
1213 		/* store offset temporarily */
1214 		parm->size[4] = parm->size[0];
1215 
1216 		parm->size[0] += ((uint8_t *)parm->dma_page_cache_ptr) -
1217 		    ((uint8_t *)0);
1218 
1219 		/* store end offset temporarily */
1220 		parm->size[5] = parm->size[0];
1221 
1222 		parm->size[0] += ((uint8_t *)parm->xfer_page_cache_ptr) -
1223 		    ((uint8_t *)0);
1224 
1225 		/* store end offset temporarily */
1226 
1227 		parm->size[2] = parm->size[0];
1228 
1229 		/* align data properly */
1230 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1231 
1232 		parm->size[6] = parm->size[0];
1233 
1234 		parm->size[0] += ((uint8_t *)parm->xfer_length_ptr) -
1235 		    ((uint8_t *)0);
1236 
1237 		/* align data properly */
1238 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1239 
1240 		/* allocate zeroed memory */
1241 		buf = malloc(parm->size[0], M_USB, M_WAITOK | M_ZERO);
1242 
1243 		if (buf == NULL) {
1244 			parm->err = USB_ERR_NOMEM;
1245 			DPRINTFN(0, "cannot allocate memory block for "
1246 			    "configuration (%d bytes)\n",
1247 			    parm->size[0]);
1248 			goto done;
1249 		}
1250 		parm->dma_tag_p = USB_ADD_BYTES(buf, parm->size[1]);
1251 		parm->dma_page_ptr = USB_ADD_BYTES(buf, parm->size[3]);
1252 		parm->dma_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[4]);
1253 		parm->xfer_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[5]);
1254 		parm->xfer_length_ptr = USB_ADD_BYTES(buf, parm->size[6]);
1255 	}
1256 
1257 done:
1258 	if (buf) {
1259 		if (info->setup_refcount == 0) {
1260 			/*
1261 			 * "usbd_transfer_unsetup_sub" will unlock
1262 			 * the bus mutex before returning !
1263 			 */
1264 			USB_BUS_LOCK(info->bus);
1265 
1266 			/* something went wrong */
1267 			usbd_transfer_unsetup_sub(info, 0);
1268 		}
1269 	}
1270 
1271 	/* check if any errors happened */
1272 	if (parm->err)
1273 		usbd_transfer_unsetup(ppxfer, n_setup);
1274 
1275 	error = parm->err;
1276 
1277 	if (do_unlock)
1278 		usbd_ctrl_unlock(udev);
1279 
1280 	return (error);
1281 }
1282 
1283 /*------------------------------------------------------------------------*
1284  *	usbd_transfer_unsetup_sub - factored out code
1285  *------------------------------------------------------------------------*/
1286 static void
1287 usbd_transfer_unsetup_sub(struct usb_xfer_root *info, uint8_t needs_delay)
1288 {
1289 #if USB_HAVE_BUSDMA
1290 	struct usb_page_cache *pc;
1291 #endif
1292 
1293 	USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
1294 
1295 	/* wait for any outstanding DMA operations */
1296 
1297 	if (needs_delay) {
1298 		usb_timeout_t temp;
1299 		temp = usbd_get_dma_delay(info->udev);
1300 		if (temp != 0) {
1301 			usb_pause_mtx(&info->bus->bus_mtx,
1302 			    USB_MS_TO_TICKS(temp));
1303 		}
1304 	}
1305 
1306 	/* make sure that our done messages are not queued anywhere */
1307 	usb_proc_mwait(info->done_p, &info->done_m[0], &info->done_m[1]);
1308 
1309 	USB_BUS_UNLOCK(info->bus);
1310 
1311 #if USB_HAVE_BUSDMA
1312 	/* free DMA'able memory, if any */
1313 	pc = info->dma_page_cache_start;
1314 	while (pc != info->dma_page_cache_end) {
1315 		usb_pc_free_mem(pc);
1316 		pc++;
1317 	}
1318 
1319 	/* free DMA maps in all "xfer->frbuffers" */
1320 	pc = info->xfer_page_cache_start;
1321 	while (pc != info->xfer_page_cache_end) {
1322 		usb_pc_dmamap_destroy(pc);
1323 		pc++;
1324 	}
1325 
1326 	/* free all DMA tags */
1327 	usb_dma_tag_unsetup(&info->dma_parent_tag);
1328 #endif
1329 
1330 	cv_destroy(&info->cv_drain);
1331 
1332 	/*
1333 	 * free the "memory_base" last, hence the "info" structure is
1334 	 * contained within the "memory_base"!
1335 	 */
1336 	free(info->memory_base, M_USB);
1337 }
1338 
1339 /*------------------------------------------------------------------------*
1340  *	usbd_transfer_unsetup - unsetup/free an array of USB transfers
1341  *
1342  * NOTE: All USB transfers in progress will get called back passing
1343  * the error code "USB_ERR_CANCELLED" before this function
1344  * returns.
1345  *------------------------------------------------------------------------*/
1346 void
1347 usbd_transfer_unsetup(struct usb_xfer **pxfer, uint16_t n_setup)
1348 {
1349 	struct usb_xfer *xfer;
1350 	struct usb_xfer_root *info;
1351 	uint8_t needs_delay = 0;
1352 
1353 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1354 	    "usbd_transfer_unsetup can sleep!");
1355 
1356 	while (n_setup--) {
1357 		xfer = pxfer[n_setup];
1358 
1359 		if (xfer == NULL)
1360 			continue;
1361 
1362 		info = xfer->xroot;
1363 
1364 		USB_XFER_LOCK(xfer);
1365 		USB_BUS_LOCK(info->bus);
1366 
1367 		/*
1368 		 * HINT: when you start/stop a transfer, it might be a
1369 		 * good idea to directly use the "pxfer[]" structure:
1370 		 *
1371 		 * usbd_transfer_start(sc->pxfer[0]);
1372 		 * usbd_transfer_stop(sc->pxfer[0]);
1373 		 *
1374 		 * That way, if your code has many parts that will not
1375 		 * stop running under the same lock, in other words
1376 		 * "xfer_mtx", the usbd_transfer_start and
1377 		 * usbd_transfer_stop functions will simply return
1378 		 * when they detect a NULL pointer argument.
1379 		 *
1380 		 * To avoid any races we clear the "pxfer[]" pointer
1381 		 * while holding the private mutex of the driver:
1382 		 */
1383 		pxfer[n_setup] = NULL;
1384 
1385 		USB_BUS_UNLOCK(info->bus);
1386 		USB_XFER_UNLOCK(xfer);
1387 
1388 		usbd_transfer_drain(xfer);
1389 
1390 #if USB_HAVE_BUSDMA
1391 		if (xfer->flags_int.bdma_enable)
1392 			needs_delay = 1;
1393 #endif
1394 		/*
1395 		 * NOTE: default endpoint does not have an
1396 		 * interface, even if endpoint->iface_index == 0
1397 		 */
1398 		USB_BUS_LOCK(info->bus);
1399 		xfer->endpoint->refcount_alloc--;
1400 		USB_BUS_UNLOCK(info->bus);
1401 
1402 		usb_callout_drain(&xfer->timeout_handle);
1403 
1404 		USB_BUS_LOCK(info->bus);
1405 
1406 		USB_ASSERT(info->setup_refcount != 0, ("Invalid setup "
1407 		    "reference count\n"));
1408 
1409 		info->setup_refcount--;
1410 
1411 		if (info->setup_refcount == 0) {
1412 			usbd_transfer_unsetup_sub(info,
1413 			    needs_delay);
1414 		} else {
1415 			USB_BUS_UNLOCK(info->bus);
1416 		}
1417 	}
1418 }
1419 
1420 /*------------------------------------------------------------------------*
1421  *	usbd_control_transfer_init - factored out code
1422  *
1423  * In USB Device Mode we have to wait for the SETUP packet which
1424  * containst the "struct usb_device_request" structure, before we can
1425  * transfer any data. In USB Host Mode we already have the SETUP
1426  * packet at the moment the USB transfer is started. This leads us to
1427  * having to setup the USB transfer at two different places in
1428  * time. This function just contains factored out control transfer
1429  * initialisation code, so that we don't duplicate the code.
1430  *------------------------------------------------------------------------*/
1431 static void
1432 usbd_control_transfer_init(struct usb_xfer *xfer)
1433 {
1434 	struct usb_device_request req;
1435 
1436 	/* copy out the USB request header */
1437 
1438 	usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1439 
1440 	/* setup remainder */
1441 
1442 	xfer->flags_int.control_rem = UGETW(req.wLength);
1443 
1444 	/* copy direction to endpoint variable */
1445 
1446 	xfer->endpointno &= ~(UE_DIR_IN | UE_DIR_OUT);
1447 	xfer->endpointno |=
1448 	    (req.bmRequestType & UT_READ) ? UE_DIR_IN : UE_DIR_OUT;
1449 }
1450 
1451 /*------------------------------------------------------------------------*
1452  *	usbd_control_transfer_did_data
1453  *
1454  * This function returns non-zero if a control endpoint has
1455  * transferred the first DATA packet after the SETUP packet.
1456  * Else it returns zero.
1457  *------------------------------------------------------------------------*/
1458 static uint8_t
1459 usbd_control_transfer_did_data(struct usb_xfer *xfer)
1460 {
1461 	struct usb_device_request req;
1462 
1463 	/* SETUP packet is not yet sent */
1464 	if (xfer->flags_int.control_hdr != 0)
1465 		return (0);
1466 
1467 	/* copy out the USB request header */
1468 	usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1469 
1470 	/* compare remainder to the initial value */
1471 	return (xfer->flags_int.control_rem != UGETW(req.wLength));
1472 }
1473 
1474 /*------------------------------------------------------------------------*
1475  *	usbd_setup_ctrl_transfer
1476  *
1477  * This function handles initialisation of control transfers. Control
1478  * transfers are special in that regard that they can both transmit
1479  * and receive data.
1480  *
1481  * Return values:
1482  *    0: Success
1483  * Else: Failure
1484  *------------------------------------------------------------------------*/
1485 static int
1486 usbd_setup_ctrl_transfer(struct usb_xfer *xfer)
1487 {
1488 	usb_frlength_t len;
1489 
1490 	/* Check for control endpoint stall */
1491 	if (xfer->flags.stall_pipe && xfer->flags_int.control_act) {
1492 		/* the control transfer is no longer active */
1493 		xfer->flags_int.control_stall = 1;
1494 		xfer->flags_int.control_act = 0;
1495 	} else {
1496 		/* don't stall control transfer by default */
1497 		xfer->flags_int.control_stall = 0;
1498 	}
1499 
1500 	/* Check for invalid number of frames */
1501 	if (xfer->nframes > 2) {
1502 		/*
1503 		 * If you need to split a control transfer, you
1504 		 * have to do one part at a time. Only with
1505 		 * non-control transfers you can do multiple
1506 		 * parts a time.
1507 		 */
1508 		DPRINTFN(0, "Too many frames: %u\n",
1509 		    (unsigned int)xfer->nframes);
1510 		goto error;
1511 	}
1512 
1513 	/*
1514          * Check if there is a control
1515          * transfer in progress:
1516          */
1517 	if (xfer->flags_int.control_act) {
1518 
1519 		if (xfer->flags_int.control_hdr) {
1520 
1521 			/* clear send header flag */
1522 
1523 			xfer->flags_int.control_hdr = 0;
1524 
1525 			/* setup control transfer */
1526 			if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1527 				usbd_control_transfer_init(xfer);
1528 			}
1529 		}
1530 		/* get data length */
1531 
1532 		len = xfer->sumlen;
1533 
1534 	} else {
1535 
1536 		/* the size of the SETUP structure is hardcoded ! */
1537 
1538 		if (xfer->frlengths[0] != sizeof(struct usb_device_request)) {
1539 			DPRINTFN(0, "Wrong framelength %u != %zu\n",
1540 			    xfer->frlengths[0], sizeof(struct
1541 			    usb_device_request));
1542 			goto error;
1543 		}
1544 		/* check USB mode */
1545 		if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1546 
1547 			/* check number of frames */
1548 			if (xfer->nframes != 1) {
1549 				/*
1550 			         * We need to receive the setup
1551 			         * message first so that we know the
1552 			         * data direction!
1553 			         */
1554 				DPRINTF("Misconfigured transfer\n");
1555 				goto error;
1556 			}
1557 			/*
1558 			 * Set a dummy "control_rem" value.  This
1559 			 * variable will be overwritten later by a
1560 			 * call to "usbd_control_transfer_init()" !
1561 			 */
1562 			xfer->flags_int.control_rem = 0xFFFF;
1563 		} else {
1564 
1565 			/* setup "endpoint" and "control_rem" */
1566 
1567 			usbd_control_transfer_init(xfer);
1568 		}
1569 
1570 		/* set transfer-header flag */
1571 
1572 		xfer->flags_int.control_hdr = 1;
1573 
1574 		/* get data length */
1575 
1576 		len = (xfer->sumlen - sizeof(struct usb_device_request));
1577 	}
1578 
1579 	/* update did data flag */
1580 
1581 	xfer->flags_int.control_did_data =
1582 	    usbd_control_transfer_did_data(xfer);
1583 
1584 	/* check if there is a length mismatch */
1585 
1586 	if (len > xfer->flags_int.control_rem) {
1587 		DPRINTFN(0, "Length (%d) greater than "
1588 		    "remaining length (%d)\n", len,
1589 		    xfer->flags_int.control_rem);
1590 		goto error;
1591 	}
1592 	/* check if we are doing a short transfer */
1593 
1594 	if (xfer->flags.force_short_xfer) {
1595 		xfer->flags_int.control_rem = 0;
1596 	} else {
1597 		if ((len != xfer->max_data_length) &&
1598 		    (len != xfer->flags_int.control_rem) &&
1599 		    (xfer->nframes != 1)) {
1600 			DPRINTFN(0, "Short control transfer without "
1601 			    "force_short_xfer set\n");
1602 			goto error;
1603 		}
1604 		xfer->flags_int.control_rem -= len;
1605 	}
1606 
1607 	/* the status part is executed when "control_act" is 0 */
1608 
1609 	if ((xfer->flags_int.control_rem > 0) ||
1610 	    (xfer->flags.manual_status)) {
1611 		/* don't execute the STATUS stage yet */
1612 		xfer->flags_int.control_act = 1;
1613 
1614 		/* sanity check */
1615 		if ((!xfer->flags_int.control_hdr) &&
1616 		    (xfer->nframes == 1)) {
1617 			/*
1618 		         * This is not a valid operation!
1619 		         */
1620 			DPRINTFN(0, "Invalid parameter "
1621 			    "combination\n");
1622 			goto error;
1623 		}
1624 	} else {
1625 		/* time to execute the STATUS stage */
1626 		xfer->flags_int.control_act = 0;
1627 	}
1628 	return (0);			/* success */
1629 
1630 error:
1631 	return (1);			/* failure */
1632 }
1633 
1634 /*------------------------------------------------------------------------*
1635  *	usbd_transfer_submit - start USB hardware for the given transfer
1636  *
1637  * This function should only be called from the USB callback.
1638  *------------------------------------------------------------------------*/
1639 void
1640 usbd_transfer_submit(struct usb_xfer *xfer)
1641 {
1642 	struct usb_xfer_root *info;
1643 	struct usb_bus *bus;
1644 	usb_frcount_t x;
1645 
1646 	info = xfer->xroot;
1647 	bus = info->bus;
1648 
1649 	DPRINTF("xfer=%p, endpoint=%p, nframes=%d, dir=%s\n",
1650 	    xfer, xfer->endpoint, xfer->nframes, USB_GET_DATA_ISREAD(xfer) ?
1651 	    "read" : "write");
1652 
1653 #ifdef USB_DEBUG
1654 	if (USB_DEBUG_VAR > 0) {
1655 		USB_BUS_LOCK(bus);
1656 
1657 		usb_dump_endpoint(xfer->endpoint);
1658 
1659 		USB_BUS_UNLOCK(bus);
1660 	}
1661 #endif
1662 
1663 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1664 	USB_BUS_LOCK_ASSERT(bus, MA_NOTOWNED);
1665 
1666 	/* Only open the USB transfer once! */
1667 	if (!xfer->flags_int.open) {
1668 		xfer->flags_int.open = 1;
1669 
1670 		DPRINTF("open\n");
1671 
1672 		USB_BUS_LOCK(bus);
1673 		(xfer->endpoint->methods->open) (xfer);
1674 		USB_BUS_UNLOCK(bus);
1675 	}
1676 	/* set "transferring" flag */
1677 	xfer->flags_int.transferring = 1;
1678 
1679 #if USB_HAVE_POWERD
1680 	/* increment power reference */
1681 	usbd_transfer_power_ref(xfer, 1);
1682 #endif
1683 	/*
1684 	 * Check if the transfer is waiting on a queue, most
1685 	 * frequently the "done_q":
1686 	 */
1687 	if (xfer->wait_queue) {
1688 		USB_BUS_LOCK(bus);
1689 		usbd_transfer_dequeue(xfer);
1690 		USB_BUS_UNLOCK(bus);
1691 	}
1692 	/* clear "did_dma_delay" flag */
1693 	xfer->flags_int.did_dma_delay = 0;
1694 
1695 	/* clear "did_close" flag */
1696 	xfer->flags_int.did_close = 0;
1697 
1698 #if USB_HAVE_BUSDMA
1699 	/* clear "bdma_setup" flag */
1700 	xfer->flags_int.bdma_setup = 0;
1701 #endif
1702 	/* by default we cannot cancel any USB transfer immediately */
1703 	xfer->flags_int.can_cancel_immed = 0;
1704 
1705 	/* clear lengths and frame counts by default */
1706 	xfer->sumlen = 0;
1707 	xfer->actlen = 0;
1708 	xfer->aframes = 0;
1709 
1710 	/* clear any previous errors */
1711 	xfer->error = 0;
1712 
1713 	/* Check if the device is still alive */
1714 	if (info->udev->state < USB_STATE_POWERED) {
1715 		USB_BUS_LOCK(bus);
1716 		/*
1717 		 * Must return cancelled error code else
1718 		 * device drivers can hang.
1719 		 */
1720 		usbd_transfer_done(xfer, USB_ERR_CANCELLED);
1721 		USB_BUS_UNLOCK(bus);
1722 		return;
1723 	}
1724 
1725 	/* sanity check */
1726 	if (xfer->nframes == 0) {
1727 		if (xfer->flags.stall_pipe) {
1728 			/*
1729 			 * Special case - want to stall without transferring
1730 			 * any data:
1731 			 */
1732 			DPRINTF("xfer=%p nframes=0: stall "
1733 			    "or clear stall!\n", xfer);
1734 			USB_BUS_LOCK(bus);
1735 			xfer->flags_int.can_cancel_immed = 1;
1736 			/* start the transfer */
1737 			usb_command_wrapper(&xfer->endpoint->
1738 			    endpoint_q[xfer->stream_id], xfer);
1739 			USB_BUS_UNLOCK(bus);
1740 			return;
1741 		}
1742 		USB_BUS_LOCK(bus);
1743 		usbd_transfer_done(xfer, USB_ERR_INVAL);
1744 		USB_BUS_UNLOCK(bus);
1745 		return;
1746 	}
1747 	/* compute some variables */
1748 
1749 	for (x = 0; x != xfer->nframes; x++) {
1750 		/* make a copy of the frlenghts[] */
1751 		xfer->frlengths[x + xfer->max_frame_count] = xfer->frlengths[x];
1752 		/* compute total transfer length */
1753 		xfer->sumlen += xfer->frlengths[x];
1754 		if (xfer->sumlen < xfer->frlengths[x]) {
1755 			/* length wrapped around */
1756 			USB_BUS_LOCK(bus);
1757 			usbd_transfer_done(xfer, USB_ERR_INVAL);
1758 			USB_BUS_UNLOCK(bus);
1759 			return;
1760 		}
1761 	}
1762 
1763 	/* clear some internal flags */
1764 
1765 	xfer->flags_int.short_xfer_ok = 0;
1766 	xfer->flags_int.short_frames_ok = 0;
1767 
1768 	/* check if this is a control transfer */
1769 
1770 	if (xfer->flags_int.control_xfr) {
1771 
1772 		if (usbd_setup_ctrl_transfer(xfer)) {
1773 			USB_BUS_LOCK(bus);
1774 			usbd_transfer_done(xfer, USB_ERR_STALLED);
1775 			USB_BUS_UNLOCK(bus);
1776 			return;
1777 		}
1778 	}
1779 	/*
1780 	 * Setup filtered version of some transfer flags,
1781 	 * in case of data read direction
1782 	 */
1783 	if (USB_GET_DATA_ISREAD(xfer)) {
1784 
1785 		if (xfer->flags.short_frames_ok) {
1786 			xfer->flags_int.short_xfer_ok = 1;
1787 			xfer->flags_int.short_frames_ok = 1;
1788 		} else if (xfer->flags.short_xfer_ok) {
1789 			xfer->flags_int.short_xfer_ok = 1;
1790 
1791 			/* check for control transfer */
1792 			if (xfer->flags_int.control_xfr) {
1793 				/*
1794 				 * 1) Control transfers do not support
1795 				 * reception of multiple short USB
1796 				 * frames in host mode and device side
1797 				 * mode, with exception of:
1798 				 *
1799 				 * 2) Due to sometimes buggy device
1800 				 * side firmware we need to do a
1801 				 * STATUS stage in case of short
1802 				 * control transfers in USB host mode.
1803 				 * The STATUS stage then becomes the
1804 				 * "alt_next" to the DATA stage.
1805 				 */
1806 				xfer->flags_int.short_frames_ok = 1;
1807 			}
1808 		}
1809 	}
1810 	/*
1811 	 * Check if BUS-DMA support is enabled and try to load virtual
1812 	 * buffers into DMA, if any:
1813 	 */
1814 #if USB_HAVE_BUSDMA
1815 	if (xfer->flags_int.bdma_enable) {
1816 		/* insert the USB transfer last in the BUS-DMA queue */
1817 		usb_command_wrapper(&xfer->xroot->dma_q, xfer);
1818 		return;
1819 	}
1820 #endif
1821 	/*
1822 	 * Enter the USB transfer into the Host Controller or
1823 	 * Device Controller schedule:
1824 	 */
1825 	usbd_pipe_enter(xfer);
1826 }
1827 
1828 /*------------------------------------------------------------------------*
1829  *	usbd_pipe_enter - factored out code
1830  *------------------------------------------------------------------------*/
1831 void
1832 usbd_pipe_enter(struct usb_xfer *xfer)
1833 {
1834 	struct usb_endpoint *ep;
1835 
1836 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1837 
1838 	USB_BUS_LOCK(xfer->xroot->bus);
1839 
1840 	ep = xfer->endpoint;
1841 
1842 	DPRINTF("enter\n");
1843 
1844 	/* the transfer can now be cancelled */
1845 	xfer->flags_int.can_cancel_immed = 1;
1846 
1847 	/* enter the transfer */
1848 	(ep->methods->enter) (xfer);
1849 
1850 	/* check for transfer error */
1851 	if (xfer->error) {
1852 		/* some error has happened */
1853 		usbd_transfer_done(xfer, 0);
1854 		USB_BUS_UNLOCK(xfer->xroot->bus);
1855 		return;
1856 	}
1857 
1858 	/* start the transfer */
1859 	usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], xfer);
1860 	USB_BUS_UNLOCK(xfer->xroot->bus);
1861 }
1862 
1863 /*------------------------------------------------------------------------*
1864  *	usbd_transfer_start - start an USB transfer
1865  *
1866  * NOTE: Calling this function more than one time will only
1867  *       result in a single transfer start, until the USB transfer
1868  *       completes.
1869  *------------------------------------------------------------------------*/
1870 void
1871 usbd_transfer_start(struct usb_xfer *xfer)
1872 {
1873 	if (xfer == NULL) {
1874 		/* transfer is gone */
1875 		return;
1876 	}
1877 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1878 
1879 	/* mark the USB transfer started */
1880 
1881 	if (!xfer->flags_int.started) {
1882 		/* lock the BUS lock to avoid races updating flags_int */
1883 		USB_BUS_LOCK(xfer->xroot->bus);
1884 		xfer->flags_int.started = 1;
1885 		USB_BUS_UNLOCK(xfer->xroot->bus);
1886 	}
1887 	/* check if the USB transfer callback is already transferring */
1888 
1889 	if (xfer->flags_int.transferring) {
1890 		return;
1891 	}
1892 	USB_BUS_LOCK(xfer->xroot->bus);
1893 	/* call the USB transfer callback */
1894 	usbd_callback_ss_done_defer(xfer);
1895 	USB_BUS_UNLOCK(xfer->xroot->bus);
1896 }
1897 
1898 /*------------------------------------------------------------------------*
1899  *	usbd_transfer_stop - stop an USB transfer
1900  *
1901  * NOTE: Calling this function more than one time will only
1902  *       result in a single transfer stop.
1903  * NOTE: When this function returns it is not safe to free nor
1904  *       reuse any DMA buffers. See "usbd_transfer_drain()".
1905  *------------------------------------------------------------------------*/
1906 void
1907 usbd_transfer_stop(struct usb_xfer *xfer)
1908 {
1909 	struct usb_endpoint *ep;
1910 
1911 	if (xfer == NULL) {
1912 		/* transfer is gone */
1913 		return;
1914 	}
1915 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1916 
1917 	/* check if the USB transfer was ever opened */
1918 
1919 	if (!xfer->flags_int.open) {
1920 		if (xfer->flags_int.started) {
1921 			/* nothing to do except clearing the "started" flag */
1922 			/* lock the BUS lock to avoid races updating flags_int */
1923 			USB_BUS_LOCK(xfer->xroot->bus);
1924 			xfer->flags_int.started = 0;
1925 			USB_BUS_UNLOCK(xfer->xroot->bus);
1926 		}
1927 		return;
1928 	}
1929 	/* try to stop the current USB transfer */
1930 
1931 	USB_BUS_LOCK(xfer->xroot->bus);
1932 	/* override any previous error */
1933 	xfer->error = USB_ERR_CANCELLED;
1934 
1935 	/*
1936 	 * Clear "open" and "started" when both private and USB lock
1937 	 * is locked so that we don't get a race updating "flags_int"
1938 	 */
1939 	xfer->flags_int.open = 0;
1940 	xfer->flags_int.started = 0;
1941 
1942 	/*
1943 	 * Check if we can cancel the USB transfer immediately.
1944 	 */
1945 	if (xfer->flags_int.transferring) {
1946 		if (xfer->flags_int.can_cancel_immed &&
1947 		    (!xfer->flags_int.did_close)) {
1948 			DPRINTF("close\n");
1949 			/*
1950 			 * The following will lead to an USB_ERR_CANCELLED
1951 			 * error code being passed to the USB callback.
1952 			 */
1953 			(xfer->endpoint->methods->close) (xfer);
1954 			/* only close once */
1955 			xfer->flags_int.did_close = 1;
1956 		} else {
1957 			/* need to wait for the next done callback */
1958 		}
1959 	} else {
1960 		DPRINTF("close\n");
1961 
1962 		/* close here and now */
1963 		(xfer->endpoint->methods->close) (xfer);
1964 
1965 		/*
1966 		 * Any additional DMA delay is done by
1967 		 * "usbd_transfer_unsetup()".
1968 		 */
1969 
1970 		/*
1971 		 * Special case. Check if we need to restart a blocked
1972 		 * endpoint.
1973 		 */
1974 		ep = xfer->endpoint;
1975 
1976 		/*
1977 		 * If the current USB transfer is completing we need
1978 		 * to start the next one:
1979 		 */
1980 		if (ep->endpoint_q[xfer->stream_id].curr == xfer) {
1981 			usb_command_wrapper(
1982 			    &ep->endpoint_q[xfer->stream_id], NULL);
1983 		}
1984 	}
1985 
1986 	USB_BUS_UNLOCK(xfer->xroot->bus);
1987 }
1988 
1989 /*------------------------------------------------------------------------*
1990  *	usbd_transfer_pending
1991  *
1992  * This function will check if an USB transfer is pending which is a
1993  * little bit complicated!
1994  * Return values:
1995  * 0: Not pending
1996  * 1: Pending: The USB transfer will receive a callback in the future.
1997  *------------------------------------------------------------------------*/
1998 uint8_t
1999 usbd_transfer_pending(struct usb_xfer *xfer)
2000 {
2001 	struct usb_xfer_root *info;
2002 	struct usb_xfer_queue *pq;
2003 
2004 	if (xfer == NULL) {
2005 		/* transfer is gone */
2006 		return (0);
2007 	}
2008 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2009 
2010 	if (xfer->flags_int.transferring) {
2011 		/* trivial case */
2012 		return (1);
2013 	}
2014 	USB_BUS_LOCK(xfer->xroot->bus);
2015 	if (xfer->wait_queue) {
2016 		/* we are waiting on a queue somewhere */
2017 		USB_BUS_UNLOCK(xfer->xroot->bus);
2018 		return (1);
2019 	}
2020 	info = xfer->xroot;
2021 	pq = &info->done_q;
2022 
2023 	if (pq->curr == xfer) {
2024 		/* we are currently scheduled for callback */
2025 		USB_BUS_UNLOCK(xfer->xroot->bus);
2026 		return (1);
2027 	}
2028 	/* we are not pending */
2029 	USB_BUS_UNLOCK(xfer->xroot->bus);
2030 	return (0);
2031 }
2032 
2033 /*------------------------------------------------------------------------*
2034  *	usbd_transfer_drain
2035  *
2036  * This function will stop the USB transfer and wait for any
2037  * additional BUS-DMA and HW-DMA operations to complete. Buffers that
2038  * are loaded into DMA can safely be freed or reused after that this
2039  * function has returned.
2040  *------------------------------------------------------------------------*/
2041 void
2042 usbd_transfer_drain(struct usb_xfer *xfer)
2043 {
2044 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2045 	    "usbd_transfer_drain can sleep!");
2046 
2047 	if (xfer == NULL) {
2048 		/* transfer is gone */
2049 		return;
2050 	}
2051 	if (xfer->xroot->xfer_mtx != &Giant) {
2052 		USB_XFER_LOCK_ASSERT(xfer, MA_NOTOWNED);
2053 	}
2054 	USB_XFER_LOCK(xfer);
2055 
2056 	usbd_transfer_stop(xfer);
2057 
2058 	while (usbd_transfer_pending(xfer) ||
2059 	    xfer->flags_int.doing_callback) {
2060 
2061 		/*
2062 		 * It is allowed that the callback can drop its
2063 		 * transfer mutex. In that case checking only
2064 		 * "usbd_transfer_pending()" is not enough to tell if
2065 		 * the USB transfer is fully drained. We also need to
2066 		 * check the internal "doing_callback" flag.
2067 		 */
2068 		xfer->flags_int.draining = 1;
2069 
2070 		/*
2071 		 * Wait until the current outstanding USB
2072 		 * transfer is complete !
2073 		 */
2074 		cv_wait(&xfer->xroot->cv_drain, xfer->xroot->xfer_mtx);
2075 	}
2076 	USB_XFER_UNLOCK(xfer);
2077 }
2078 
2079 struct usb_page_cache *
2080 usbd_xfer_get_frame(struct usb_xfer *xfer, usb_frcount_t frindex)
2081 {
2082 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2083 
2084 	return (&xfer->frbuffers[frindex]);
2085 }
2086 
2087 void *
2088 usbd_xfer_get_frame_buffer(struct usb_xfer *xfer, usb_frcount_t frindex)
2089 {
2090 	struct usb_page_search page_info;
2091 
2092 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2093 
2094 	usbd_get_page(&xfer->frbuffers[frindex], 0, &page_info);
2095 	return (page_info.buffer);
2096 }
2097 
2098 /*------------------------------------------------------------------------*
2099  *	usbd_xfer_get_fps_shift
2100  *
2101  * The following function is only useful for isochronous transfers. It
2102  * returns how many times the frame execution rate has been shifted
2103  * down.
2104  *
2105  * Return value:
2106  * Success: 0..3
2107  * Failure: 0
2108  *------------------------------------------------------------------------*/
2109 uint8_t
2110 usbd_xfer_get_fps_shift(struct usb_xfer *xfer)
2111 {
2112 	return (xfer->fps_shift);
2113 }
2114 
2115 usb_frlength_t
2116 usbd_xfer_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex)
2117 {
2118 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2119 
2120 	return (xfer->frlengths[frindex]);
2121 }
2122 
2123 /*------------------------------------------------------------------------*
2124  *	usbd_xfer_set_frame_data
2125  *
2126  * This function sets the pointer of the buffer that should
2127  * loaded directly into DMA for the given USB frame. Passing "ptr"
2128  * equal to NULL while the corresponding "frlength" is greater
2129  * than zero gives undefined results!
2130  *------------------------------------------------------------------------*/
2131 void
2132 usbd_xfer_set_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
2133     void *ptr, usb_frlength_t len)
2134 {
2135 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2136 
2137 	/* set virtual address to load and length */
2138 	xfer->frbuffers[frindex].buffer = ptr;
2139 	usbd_xfer_set_frame_len(xfer, frindex, len);
2140 }
2141 
2142 void
2143 usbd_xfer_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
2144     void **ptr, int *len)
2145 {
2146 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2147 
2148 	if (ptr != NULL)
2149 		*ptr = xfer->frbuffers[frindex].buffer;
2150 	if (len != NULL)
2151 		*len = xfer->frlengths[frindex];
2152 }
2153 
2154 /*------------------------------------------------------------------------*
2155  *	usbd_xfer_old_frame_length
2156  *
2157  * This function returns the framelength of the given frame at the
2158  * time the transfer was submitted. This function can be used to
2159  * compute the starting data pointer of the next isochronous frame
2160  * when an isochronous transfer has completed.
2161  *------------------------------------------------------------------------*/
2162 usb_frlength_t
2163 usbd_xfer_old_frame_length(struct usb_xfer *xfer, usb_frcount_t frindex)
2164 {
2165 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2166 
2167 	return (xfer->frlengths[frindex + xfer->max_frame_count]);
2168 }
2169 
2170 void
2171 usbd_xfer_status(struct usb_xfer *xfer, int *actlen, int *sumlen, int *aframes,
2172     int *nframes)
2173 {
2174 	if (actlen != NULL)
2175 		*actlen = xfer->actlen;
2176 	if (sumlen != NULL)
2177 		*sumlen = xfer->sumlen;
2178 	if (aframes != NULL)
2179 		*aframes = xfer->aframes;
2180 	if (nframes != NULL)
2181 		*nframes = xfer->nframes;
2182 }
2183 
2184 /*------------------------------------------------------------------------*
2185  *	usbd_xfer_set_frame_offset
2186  *
2187  * This function sets the frame data buffer offset relative to the beginning
2188  * of the USB DMA buffer allocated for this USB transfer.
2189  *------------------------------------------------------------------------*/
2190 void
2191 usbd_xfer_set_frame_offset(struct usb_xfer *xfer, usb_frlength_t offset,
2192     usb_frcount_t frindex)
2193 {
2194 	KASSERT(!xfer->flags.ext_buffer, ("Cannot offset data frame "
2195 	    "when the USB buffer is external\n"));
2196 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2197 
2198 	/* set virtual address to load */
2199 	xfer->frbuffers[frindex].buffer =
2200 	    USB_ADD_BYTES(xfer->local_buffer, offset);
2201 }
2202 
2203 void
2204 usbd_xfer_set_interval(struct usb_xfer *xfer, int i)
2205 {
2206 	xfer->interval = i;
2207 }
2208 
2209 void
2210 usbd_xfer_set_timeout(struct usb_xfer *xfer, int t)
2211 {
2212 	xfer->timeout = t;
2213 }
2214 
2215 void
2216 usbd_xfer_set_frames(struct usb_xfer *xfer, usb_frcount_t n)
2217 {
2218 	xfer->nframes = n;
2219 }
2220 
2221 usb_frcount_t
2222 usbd_xfer_max_frames(struct usb_xfer *xfer)
2223 {
2224 	return (xfer->max_frame_count);
2225 }
2226 
2227 usb_frlength_t
2228 usbd_xfer_max_len(struct usb_xfer *xfer)
2229 {
2230 	return (xfer->max_data_length);
2231 }
2232 
2233 usb_frlength_t
2234 usbd_xfer_max_framelen(struct usb_xfer *xfer)
2235 {
2236 	return (xfer->max_frame_size);
2237 }
2238 
2239 void
2240 usbd_xfer_set_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex,
2241     usb_frlength_t len)
2242 {
2243 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2244 
2245 	xfer->frlengths[frindex] = len;
2246 }
2247 
2248 /*------------------------------------------------------------------------*
2249  *	usb_callback_proc - factored out code
2250  *
2251  * This function performs USB callbacks.
2252  *------------------------------------------------------------------------*/
2253 static void
2254 usb_callback_proc(struct usb_proc_msg *_pm)
2255 {
2256 	struct usb_done_msg *pm = (void *)_pm;
2257 	struct usb_xfer_root *info = pm->xroot;
2258 
2259 	/* Change locking order */
2260 	USB_BUS_UNLOCK(info->bus);
2261 
2262 	/*
2263 	 * We exploit the fact that the mutex is the same for all
2264 	 * callbacks that will be called from this thread:
2265 	 */
2266 	USB_MTX_LOCK(info->xfer_mtx);
2267 	USB_BUS_LOCK(info->bus);
2268 
2269 	/* Continue where we lost track */
2270 	usb_command_wrapper(&info->done_q,
2271 	    info->done_q.curr);
2272 
2273 	USB_MTX_UNLOCK(info->xfer_mtx);
2274 }
2275 
2276 /*------------------------------------------------------------------------*
2277  *	usbd_callback_ss_done_defer
2278  *
2279  * This function will defer the start, stop and done callback to the
2280  * correct thread.
2281  *------------------------------------------------------------------------*/
2282 static void
2283 usbd_callback_ss_done_defer(struct usb_xfer *xfer)
2284 {
2285 	struct usb_xfer_root *info = xfer->xroot;
2286 	struct usb_xfer_queue *pq = &info->done_q;
2287 
2288 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2289 
2290 	if (pq->curr != xfer) {
2291 		usbd_transfer_enqueue(pq, xfer);
2292 	}
2293 	if (!pq->recurse_1) {
2294 
2295 		/*
2296 	         * We have to postpone the callback due to the fact we
2297 	         * will have a Lock Order Reversal, LOR, if we try to
2298 	         * proceed !
2299 	         */
2300 		(void) usb_proc_msignal(info->done_p,
2301 		    &info->done_m[0], &info->done_m[1]);
2302 	} else {
2303 		/* clear second recurse flag */
2304 		pq->recurse_2 = 0;
2305 	}
2306 	return;
2307 
2308 }
2309 
2310 /*------------------------------------------------------------------------*
2311  *	usbd_callback_wrapper
2312  *
2313  * This is a wrapper for USB callbacks. This wrapper does some
2314  * auto-magic things like figuring out if we can call the callback
2315  * directly from the current context or if we need to wakeup the
2316  * interrupt process.
2317  *------------------------------------------------------------------------*/
2318 static void
2319 usbd_callback_wrapper(struct usb_xfer_queue *pq)
2320 {
2321 	struct usb_xfer *xfer = pq->curr;
2322 	struct usb_xfer_root *info = xfer->xroot;
2323 
2324 	USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2325 	if ((pq->recurse_3 != 0 || mtx_owned(info->xfer_mtx) == 0) &&
2326 	    USB_IN_POLLING_MODE_FUNC() == 0) {
2327 		/*
2328 	       	 * Cases that end up here:
2329 		 *
2330 		 * 5) HW interrupt done callback or other source.
2331 		 * 6) HW completed transfer during callback
2332 		 */
2333 		DPRINTFN(3, "case 5 and 6\n");
2334 
2335 		/*
2336 	         * We have to postpone the callback due to the fact we
2337 	         * will have a Lock Order Reversal, LOR, if we try to
2338 	         * proceed!
2339 		 *
2340 		 * Postponing the callback also ensures that other USB
2341 		 * transfer queues get a chance.
2342 	         */
2343 		(void) usb_proc_msignal(info->done_p,
2344 		    &info->done_m[0], &info->done_m[1]);
2345 		return;
2346 	}
2347 	/*
2348 	 * Cases that end up here:
2349 	 *
2350 	 * 1) We are starting a transfer
2351 	 * 2) We are prematurely calling back a transfer
2352 	 * 3) We are stopping a transfer
2353 	 * 4) We are doing an ordinary callback
2354 	 */
2355 	DPRINTFN(3, "case 1-4\n");
2356 	/* get next USB transfer in the queue */
2357 	info->done_q.curr = NULL;
2358 
2359 	/* set flag in case of drain */
2360 	xfer->flags_int.doing_callback = 1;
2361 
2362 	USB_BUS_UNLOCK(info->bus);
2363 	USB_BUS_LOCK_ASSERT(info->bus, MA_NOTOWNED);
2364 
2365 	/* set correct USB state for callback */
2366 	if (!xfer->flags_int.transferring) {
2367 		xfer->usb_state = USB_ST_SETUP;
2368 		if (!xfer->flags_int.started) {
2369 			/* we got stopped before we even got started */
2370 			USB_BUS_LOCK(info->bus);
2371 			goto done;
2372 		}
2373 	} else {
2374 
2375 		if (usbd_callback_wrapper_sub(xfer)) {
2376 			/* the callback has been deferred */
2377 			USB_BUS_LOCK(info->bus);
2378 			goto done;
2379 		}
2380 #if USB_HAVE_POWERD
2381 		/* decrement power reference */
2382 		usbd_transfer_power_ref(xfer, -1);
2383 #endif
2384 		xfer->flags_int.transferring = 0;
2385 
2386 		if (xfer->error) {
2387 			xfer->usb_state = USB_ST_ERROR;
2388 		} else {
2389 			/* set transferred state */
2390 			xfer->usb_state = USB_ST_TRANSFERRED;
2391 #if USB_HAVE_BUSDMA
2392 			/* sync DMA memory, if any */
2393 			if (xfer->flags_int.bdma_enable &&
2394 			    (!xfer->flags_int.bdma_no_post_sync)) {
2395 				usb_bdma_post_sync(xfer);
2396 			}
2397 #endif
2398 		}
2399 	}
2400 
2401 #if USB_HAVE_PF
2402 	if (xfer->usb_state != USB_ST_SETUP) {
2403 		USB_BUS_LOCK(info->bus);
2404 		usbpf_xfertap(xfer, USBPF_XFERTAP_DONE);
2405 		USB_BUS_UNLOCK(info->bus);
2406 	}
2407 #endif
2408 	/* call processing routine */
2409 	(xfer->callback) (xfer, xfer->error);
2410 
2411 	/* pickup the USB mutex again */
2412 	USB_BUS_LOCK(info->bus);
2413 
2414 	/*
2415 	 * Check if we got started after that we got cancelled, but
2416 	 * before we managed to do the callback.
2417 	 */
2418 	if ((!xfer->flags_int.open) &&
2419 	    (xfer->flags_int.started) &&
2420 	    (xfer->usb_state == USB_ST_ERROR)) {
2421 		/* clear flag in case of drain */
2422 		xfer->flags_int.doing_callback = 0;
2423 		/* try to loop, but not recursivly */
2424 		usb_command_wrapper(&info->done_q, xfer);
2425 		return;
2426 	}
2427 
2428 done:
2429 	/* clear flag in case of drain */
2430 	xfer->flags_int.doing_callback = 0;
2431 
2432 	/*
2433 	 * Check if we are draining.
2434 	 */
2435 	if (xfer->flags_int.draining &&
2436 	    (!xfer->flags_int.transferring)) {
2437 		/* "usbd_transfer_drain()" is waiting for end of transfer */
2438 		xfer->flags_int.draining = 0;
2439 		cv_broadcast(&info->cv_drain);
2440 	}
2441 
2442 	/* do the next callback, if any */
2443 	usb_command_wrapper(&info->done_q,
2444 	    info->done_q.curr);
2445 }
2446 
2447 /*------------------------------------------------------------------------*
2448  *	usb_dma_delay_done_cb
2449  *
2450  * This function is called when the DMA delay has been exectuded, and
2451  * will make sure that the callback is called to complete the USB
2452  * transfer. This code path is usually only used when there is an USB
2453  * error like USB_ERR_CANCELLED.
2454  *------------------------------------------------------------------------*/
2455 void
2456 usb_dma_delay_done_cb(struct usb_xfer *xfer)
2457 {
2458 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2459 
2460 	DPRINTFN(3, "Completed %p\n", xfer);
2461 
2462 	/* queue callback for execution, again */
2463 	usbd_transfer_done(xfer, 0);
2464 }
2465 
2466 /*------------------------------------------------------------------------*
2467  *	usbd_transfer_dequeue
2468  *
2469  *  - This function is used to remove an USB transfer from a USB
2470  *  transfer queue.
2471  *
2472  *  - This function can be called multiple times in a row.
2473  *------------------------------------------------------------------------*/
2474 void
2475 usbd_transfer_dequeue(struct usb_xfer *xfer)
2476 {
2477 	struct usb_xfer_queue *pq;
2478 
2479 	pq = xfer->wait_queue;
2480 	if (pq) {
2481 		TAILQ_REMOVE(&pq->head, xfer, wait_entry);
2482 		xfer->wait_queue = NULL;
2483 	}
2484 }
2485 
2486 /*------------------------------------------------------------------------*
2487  *	usbd_transfer_enqueue
2488  *
2489  *  - This function is used to insert an USB transfer into a USB *
2490  *  transfer queue.
2491  *
2492  *  - This function can be called multiple times in a row.
2493  *------------------------------------------------------------------------*/
2494 void
2495 usbd_transfer_enqueue(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2496 {
2497 	/*
2498 	 * Insert the USB transfer into the queue, if it is not
2499 	 * already on a USB transfer queue:
2500 	 */
2501 	if (xfer->wait_queue == NULL) {
2502 		xfer->wait_queue = pq;
2503 		TAILQ_INSERT_TAIL(&pq->head, xfer, wait_entry);
2504 	}
2505 }
2506 
2507 /*------------------------------------------------------------------------*
2508  *	usbd_transfer_done
2509  *
2510  *  - This function is used to remove an USB transfer from the busdma,
2511  *  pipe or interrupt queue.
2512  *
2513  *  - This function is used to queue the USB transfer on the done
2514  *  queue.
2515  *
2516  *  - This function is used to stop any USB transfer timeouts.
2517  *------------------------------------------------------------------------*/
2518 void
2519 usbd_transfer_done(struct usb_xfer *xfer, usb_error_t error)
2520 {
2521 	struct usb_xfer_root *info = xfer->xroot;
2522 
2523 	USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2524 
2525 	DPRINTF("err=%s\n", usbd_errstr(error));
2526 
2527 	/*
2528 	 * If we are not transferring then just return.
2529 	 * This can happen during transfer cancel.
2530 	 */
2531 	if (!xfer->flags_int.transferring) {
2532 		DPRINTF("not transferring\n");
2533 		/* end of control transfer, if any */
2534 		xfer->flags_int.control_act = 0;
2535 		return;
2536 	}
2537 	/* only set transfer error, if not already set */
2538 	if (xfer->error == USB_ERR_NORMAL_COMPLETION)
2539 		xfer->error = error;
2540 
2541 	/* stop any callouts */
2542 	usb_callout_stop(&xfer->timeout_handle);
2543 
2544 	/*
2545 	 * If we are waiting on a queue, just remove the USB transfer
2546 	 * from the queue, if any. We should have the required locks
2547 	 * locked to do the remove when this function is called.
2548 	 */
2549 	usbd_transfer_dequeue(xfer);
2550 
2551 #if USB_HAVE_BUSDMA
2552 	if (mtx_owned(info->xfer_mtx)) {
2553 		struct usb_xfer_queue *pq;
2554 
2555 		/*
2556 		 * If the private USB lock is not locked, then we assume
2557 		 * that the BUS-DMA load stage has been passed:
2558 		 */
2559 		pq = &info->dma_q;
2560 
2561 		if (pq->curr == xfer) {
2562 			/* start the next BUS-DMA load, if any */
2563 			usb_command_wrapper(pq, NULL);
2564 		}
2565 	}
2566 #endif
2567 	/* keep some statistics */
2568 	if (xfer->error) {
2569 		info->bus->stats_err.uds_requests
2570 		    [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2571 	} else {
2572 		info->bus->stats_ok.uds_requests
2573 		    [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2574 	}
2575 
2576 	/* call the USB transfer callback */
2577 	usbd_callback_ss_done_defer(xfer);
2578 }
2579 
2580 /*------------------------------------------------------------------------*
2581  *	usbd_transfer_start_cb
2582  *
2583  * This function is called to start the USB transfer when
2584  * "xfer->interval" is greater than zero, and and the endpoint type is
2585  * BULK or CONTROL.
2586  *------------------------------------------------------------------------*/
2587 static void
2588 usbd_transfer_start_cb(void *arg)
2589 {
2590 	struct usb_xfer *xfer = arg;
2591 	struct usb_endpoint *ep = xfer->endpoint;
2592 
2593 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2594 
2595 	DPRINTF("start\n");
2596 
2597 #if USB_HAVE_PF
2598 	usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2599 #endif
2600 
2601 	/* the transfer can now be cancelled */
2602 	xfer->flags_int.can_cancel_immed = 1;
2603 
2604 	/* start USB transfer, if no error */
2605 	if (xfer->error == 0)
2606 		(ep->methods->start) (xfer);
2607 
2608 	/* check for transfer error */
2609 	if (xfer->error) {
2610 		/* some error has happened */
2611 		usbd_transfer_done(xfer, 0);
2612 	}
2613 }
2614 
2615 /*------------------------------------------------------------------------*
2616  *	usbd_xfer_set_stall
2617  *
2618  * This function is used to set the stall flag outside the
2619  * callback. This function is NULL safe.
2620  *------------------------------------------------------------------------*/
2621 void
2622 usbd_xfer_set_stall(struct usb_xfer *xfer)
2623 {
2624 	if (xfer == NULL) {
2625 		/* tearing down */
2626 		return;
2627 	}
2628 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2629 
2630 	/* avoid any races by locking the USB mutex */
2631 	USB_BUS_LOCK(xfer->xroot->bus);
2632 	xfer->flags.stall_pipe = 1;
2633 	USB_BUS_UNLOCK(xfer->xroot->bus);
2634 }
2635 
2636 int
2637 usbd_xfer_is_stalled(struct usb_xfer *xfer)
2638 {
2639 	return (xfer->endpoint->is_stalled);
2640 }
2641 
2642 /*------------------------------------------------------------------------*
2643  *	usbd_transfer_clear_stall
2644  *
2645  * This function is used to clear the stall flag outside the
2646  * callback. This function is NULL safe.
2647  *------------------------------------------------------------------------*/
2648 void
2649 usbd_transfer_clear_stall(struct usb_xfer *xfer)
2650 {
2651 	if (xfer == NULL) {
2652 		/* tearing down */
2653 		return;
2654 	}
2655 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2656 
2657 	/* avoid any races by locking the USB mutex */
2658 	USB_BUS_LOCK(xfer->xroot->bus);
2659 
2660 	xfer->flags.stall_pipe = 0;
2661 
2662 	USB_BUS_UNLOCK(xfer->xroot->bus);
2663 }
2664 
2665 /*------------------------------------------------------------------------*
2666  *	usbd_pipe_start
2667  *
2668  * This function is used to add an USB transfer to the pipe transfer list.
2669  *------------------------------------------------------------------------*/
2670 void
2671 usbd_pipe_start(struct usb_xfer_queue *pq)
2672 {
2673 	struct usb_endpoint *ep;
2674 	struct usb_xfer *xfer;
2675 	uint8_t type;
2676 
2677 	xfer = pq->curr;
2678 	ep = xfer->endpoint;
2679 
2680 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2681 
2682 	/*
2683 	 * If the endpoint is already stalled we do nothing !
2684 	 */
2685 	if (ep->is_stalled) {
2686 		return;
2687 	}
2688 	/*
2689 	 * Check if we are supposed to stall the endpoint:
2690 	 */
2691 	if (xfer->flags.stall_pipe) {
2692 		struct usb_device *udev;
2693 		struct usb_xfer_root *info;
2694 
2695 		/* clear stall command */
2696 		xfer->flags.stall_pipe = 0;
2697 
2698 		/* get pointer to USB device */
2699 		info = xfer->xroot;
2700 		udev = info->udev;
2701 
2702 		/*
2703 		 * Only stall BULK and INTERRUPT endpoints.
2704 		 */
2705 		type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2706 		if ((type == UE_BULK) ||
2707 		    (type == UE_INTERRUPT)) {
2708 			uint8_t did_stall;
2709 
2710 			did_stall = 1;
2711 
2712 			if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2713 				(udev->bus->methods->set_stall) (
2714 				    udev, ep, &did_stall);
2715 			} else if (udev->ctrl_xfer[1]) {
2716 				info = udev->ctrl_xfer[1]->xroot;
2717 				usb_proc_msignal(
2718 				    USB_BUS_CS_PROC(info->bus),
2719 				    &udev->cs_msg[0], &udev->cs_msg[1]);
2720 			} else {
2721 				/* should not happen */
2722 				DPRINTFN(0, "No stall handler\n");
2723 			}
2724 			/*
2725 			 * Check if we should stall. Some USB hardware
2726 			 * handles set- and clear-stall in hardware.
2727 			 */
2728 			if (did_stall) {
2729 				/*
2730 				 * The transfer will be continued when
2731 				 * the clear-stall control endpoint
2732 				 * message is received.
2733 				 */
2734 				ep->is_stalled = 1;
2735 				return;
2736 			}
2737 		} else if (type == UE_ISOCHRONOUS) {
2738 
2739 			/*
2740 			 * Make sure any FIFO overflow or other FIFO
2741 			 * error conditions go away by resetting the
2742 			 * endpoint FIFO through the clear stall
2743 			 * method.
2744 			 */
2745 			if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2746 				(udev->bus->methods->clear_stall) (udev, ep);
2747 			}
2748 		}
2749 	}
2750 	/* Set or clear stall complete - special case */
2751 	if (xfer->nframes == 0) {
2752 		/* we are complete */
2753 		xfer->aframes = 0;
2754 		usbd_transfer_done(xfer, 0);
2755 		return;
2756 	}
2757 	/*
2758 	 * Handled cases:
2759 	 *
2760 	 * 1) Start the first transfer queued.
2761 	 *
2762 	 * 2) Re-start the current USB transfer.
2763 	 */
2764 	/*
2765 	 * Check if there should be any
2766 	 * pre transfer start delay:
2767 	 */
2768 	if (xfer->interval > 0) {
2769 		type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2770 		if ((type == UE_BULK) ||
2771 		    (type == UE_CONTROL)) {
2772 			usbd_transfer_timeout_ms(xfer,
2773 			    &usbd_transfer_start_cb,
2774 			    xfer->interval);
2775 			return;
2776 		}
2777 	}
2778 	DPRINTF("start\n");
2779 
2780 #if USB_HAVE_PF
2781 	usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2782 #endif
2783 	/* the transfer can now be cancelled */
2784 	xfer->flags_int.can_cancel_immed = 1;
2785 
2786 	/* start USB transfer, if no error */
2787 	if (xfer->error == 0)
2788 		(ep->methods->start) (xfer);
2789 
2790 	/* check for transfer error */
2791 	if (xfer->error) {
2792 		/* some error has happened */
2793 		usbd_transfer_done(xfer, 0);
2794 	}
2795 }
2796 
2797 /*------------------------------------------------------------------------*
2798  *	usbd_transfer_timeout_ms
2799  *
2800  * This function is used to setup a timeout on the given USB
2801  * transfer. If the timeout has been deferred the callback given by
2802  * "cb" will get called after "ms" milliseconds.
2803  *------------------------------------------------------------------------*/
2804 void
2805 usbd_transfer_timeout_ms(struct usb_xfer *xfer,
2806     void (*cb) (void *arg), usb_timeout_t ms)
2807 {
2808 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2809 
2810 	/* defer delay */
2811 	usb_callout_reset(&xfer->timeout_handle,
2812 	    USB_MS_TO_TICKS(ms) + USB_CALLOUT_ZERO_TICKS, cb, xfer);
2813 }
2814 
2815 /*------------------------------------------------------------------------*
2816  *	usbd_callback_wrapper_sub
2817  *
2818  *  - This function will update variables in an USB transfer after
2819  *  that the USB transfer is complete.
2820  *
2821  *  - This function is used to start the next USB transfer on the
2822  *  ep transfer queue, if any.
2823  *
2824  * NOTE: In some special cases the USB transfer will not be removed from
2825  * the pipe queue, but remain first. To enforce USB transfer removal call
2826  * this function passing the error code "USB_ERR_CANCELLED".
2827  *
2828  * Return values:
2829  * 0: Success.
2830  * Else: The callback has been deferred.
2831  *------------------------------------------------------------------------*/
2832 static uint8_t
2833 usbd_callback_wrapper_sub(struct usb_xfer *xfer)
2834 {
2835 	struct usb_endpoint *ep;
2836 	struct usb_bus *bus;
2837 	usb_frcount_t x;
2838 
2839 	bus = xfer->xroot->bus;
2840 
2841 	if ((!xfer->flags_int.open) &&
2842 	    (!xfer->flags_int.did_close)) {
2843 		DPRINTF("close\n");
2844 		USB_BUS_LOCK(bus);
2845 		(xfer->endpoint->methods->close) (xfer);
2846 		USB_BUS_UNLOCK(bus);
2847 		/* only close once */
2848 		xfer->flags_int.did_close = 1;
2849 		return (1);		/* wait for new callback */
2850 	}
2851 	/*
2852 	 * If we have a non-hardware induced error we
2853 	 * need to do the DMA delay!
2854 	 */
2855 	if (xfer->error != 0 && !xfer->flags_int.did_dma_delay &&
2856 	    (xfer->error == USB_ERR_CANCELLED ||
2857 	    xfer->error == USB_ERR_TIMEOUT ||
2858 	    bus->methods->start_dma_delay != NULL)) {
2859 
2860 		usb_timeout_t temp;
2861 
2862 		/* only delay once */
2863 		xfer->flags_int.did_dma_delay = 1;
2864 
2865 		/* we can not cancel this delay */
2866 		xfer->flags_int.can_cancel_immed = 0;
2867 
2868 		temp = usbd_get_dma_delay(xfer->xroot->udev);
2869 
2870 		DPRINTFN(3, "DMA delay, %u ms, "
2871 		    "on %p\n", temp, xfer);
2872 
2873 		if (temp != 0) {
2874 			USB_BUS_LOCK(bus);
2875 			/*
2876 			 * Some hardware solutions have dedicated
2877 			 * events when it is safe to free DMA'ed
2878 			 * memory. For the other hardware platforms we
2879 			 * use a static delay.
2880 			 */
2881 			if (bus->methods->start_dma_delay != NULL) {
2882 				(bus->methods->start_dma_delay) (xfer);
2883 			} else {
2884 				usbd_transfer_timeout_ms(xfer,
2885 				    (void (*)(void *))&usb_dma_delay_done_cb,
2886 				    temp);
2887 			}
2888 			USB_BUS_UNLOCK(bus);
2889 			return (1);	/* wait for new callback */
2890 		}
2891 	}
2892 	/* check actual number of frames */
2893 	if (xfer->aframes > xfer->nframes) {
2894 		if (xfer->error == 0) {
2895 			panic("%s: actual number of frames, %d, is "
2896 			    "greater than initial number of frames, %d\n",
2897 			    __FUNCTION__, xfer->aframes, xfer->nframes);
2898 		} else {
2899 			/* just set some valid value */
2900 			xfer->aframes = xfer->nframes;
2901 		}
2902 	}
2903 	/* compute actual length */
2904 	xfer->actlen = 0;
2905 
2906 	for (x = 0; x != xfer->aframes; x++) {
2907 		xfer->actlen += xfer->frlengths[x];
2908 	}
2909 
2910 	/*
2911 	 * Frames that were not transferred get zero actual length in
2912 	 * case the USB device driver does not check the actual number
2913 	 * of frames transferred, "xfer->aframes":
2914 	 */
2915 	for (; x < xfer->nframes; x++) {
2916 		usbd_xfer_set_frame_len(xfer, x, 0);
2917 	}
2918 
2919 	/* check actual length */
2920 	if (xfer->actlen > xfer->sumlen) {
2921 		if (xfer->error == 0) {
2922 			panic("%s: actual length, %d, is greater than "
2923 			    "initial length, %d\n",
2924 			    __FUNCTION__, xfer->actlen, xfer->sumlen);
2925 		} else {
2926 			/* just set some valid value */
2927 			xfer->actlen = xfer->sumlen;
2928 		}
2929 	}
2930 	DPRINTFN(1, "xfer=%p endpoint=%p sts=%d alen=%d, slen=%d, afrm=%d, nfrm=%d\n",
2931 	    xfer, xfer->endpoint, xfer->error, xfer->actlen, xfer->sumlen,
2932 	    xfer->aframes, xfer->nframes);
2933 
2934 	if (xfer->error) {
2935 		/* end of control transfer, if any */
2936 		xfer->flags_int.control_act = 0;
2937 
2938 #if USB_HAVE_TT_SUPPORT
2939 		switch (xfer->error) {
2940 		case USB_ERR_NORMAL_COMPLETION:
2941 		case USB_ERR_SHORT_XFER:
2942 		case USB_ERR_STALLED:
2943 		case USB_ERR_CANCELLED:
2944 			/* nothing to do */
2945 			break;
2946 		default:
2947 			/* try to reset the TT, if any */
2948 			USB_BUS_LOCK(bus);
2949 			uhub_tt_buffer_reset_async_locked(xfer->xroot->udev, xfer->endpoint);
2950 			USB_BUS_UNLOCK(bus);
2951 			break;
2952 		}
2953 #endif
2954 		/* check if we should block the execution queue */
2955 		if ((xfer->error != USB_ERR_CANCELLED) &&
2956 		    (xfer->flags.pipe_bof)) {
2957 			DPRINTFN(2, "xfer=%p: Block On Failure "
2958 			    "on endpoint=%p\n", xfer, xfer->endpoint);
2959 			goto done;
2960 		}
2961 	} else {
2962 		/* check for short transfers */
2963 		if (xfer->actlen < xfer->sumlen) {
2964 
2965 			/* end of control transfer, if any */
2966 			xfer->flags_int.control_act = 0;
2967 
2968 			if (!xfer->flags_int.short_xfer_ok) {
2969 				xfer->error = USB_ERR_SHORT_XFER;
2970 				if (xfer->flags.pipe_bof) {
2971 					DPRINTFN(2, "xfer=%p: Block On Failure on "
2972 					    "Short Transfer on endpoint %p.\n",
2973 					    xfer, xfer->endpoint);
2974 					goto done;
2975 				}
2976 			}
2977 		} else {
2978 			/*
2979 			 * Check if we are in the middle of a
2980 			 * control transfer:
2981 			 */
2982 			if (xfer->flags_int.control_act) {
2983 				DPRINTFN(5, "xfer=%p: Control transfer "
2984 				    "active on endpoint=%p\n", xfer, xfer->endpoint);
2985 				goto done;
2986 			}
2987 		}
2988 	}
2989 
2990 	ep = xfer->endpoint;
2991 
2992 	/*
2993 	 * If the current USB transfer is completing we need to start the
2994 	 * next one:
2995 	 */
2996 	USB_BUS_LOCK(bus);
2997 	if (ep->endpoint_q[xfer->stream_id].curr == xfer) {
2998 		usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], NULL);
2999 
3000 		if (ep->endpoint_q[xfer->stream_id].curr != NULL ||
3001 		    TAILQ_FIRST(&ep->endpoint_q[xfer->stream_id].head) != NULL) {
3002 			/* there is another USB transfer waiting */
3003 		} else {
3004 			/* this is the last USB transfer */
3005 			/* clear isochronous sync flag */
3006 			xfer->endpoint->is_synced = 0;
3007 		}
3008 	}
3009 	USB_BUS_UNLOCK(bus);
3010 done:
3011 	return (0);
3012 }
3013 
3014 /*------------------------------------------------------------------------*
3015  *	usb_command_wrapper
3016  *
3017  * This function is used to execute commands non-recursivly on an USB
3018  * transfer.
3019  *------------------------------------------------------------------------*/
3020 void
3021 usb_command_wrapper(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
3022 {
3023 	if (xfer) {
3024 		/*
3025 		 * If the transfer is not already processing,
3026 		 * queue it!
3027 		 */
3028 		if (pq->curr != xfer) {
3029 			usbd_transfer_enqueue(pq, xfer);
3030 			if (pq->curr != NULL) {
3031 				/* something is already processing */
3032 				DPRINTFN(6, "busy %p\n", pq->curr);
3033 				return;
3034 			}
3035 		}
3036 	} else {
3037 		/* Get next element in queue */
3038 		pq->curr = NULL;
3039 	}
3040 
3041 	if (!pq->recurse_1) {
3042 
3043 		/* clear third recurse flag */
3044 		pq->recurse_3 = 0;
3045 
3046 		do {
3047 			/* set two first recurse flags */
3048 			pq->recurse_1 = 1;
3049 			pq->recurse_2 = 1;
3050 
3051 			if (pq->curr == NULL) {
3052 				xfer = TAILQ_FIRST(&pq->head);
3053 				if (xfer) {
3054 					TAILQ_REMOVE(&pq->head, xfer,
3055 					    wait_entry);
3056 					xfer->wait_queue = NULL;
3057 					pq->curr = xfer;
3058 				} else {
3059 					break;
3060 				}
3061 			}
3062 			DPRINTFN(6, "cb %p (enter)\n", pq->curr);
3063 			(pq->command) (pq);
3064 			DPRINTFN(6, "cb %p (leave)\n", pq->curr);
3065 
3066 			/*
3067 			 * Set third recurse flag to indicate
3068 			 * recursion happened:
3069 			 */
3070 			pq->recurse_3 = 1;
3071 
3072 		} while (!pq->recurse_2);
3073 
3074 		/* clear first recurse flag */
3075 		pq->recurse_1 = 0;
3076 
3077 	} else {
3078 		/* clear second recurse flag */
3079 		pq->recurse_2 = 0;
3080 	}
3081 }
3082 
3083 /*------------------------------------------------------------------------*
3084  *	usbd_ctrl_transfer_setup
3085  *
3086  * This function is used to setup the default USB control endpoint
3087  * transfer.
3088  *------------------------------------------------------------------------*/
3089 void
3090 usbd_ctrl_transfer_setup(struct usb_device *udev)
3091 {
3092 	struct usb_xfer *xfer;
3093 	uint8_t no_resetup;
3094 	uint8_t iface_index;
3095 
3096 	/* check for root HUB */
3097 	if (udev->parent_hub == NULL)
3098 		return;
3099 repeat:
3100 
3101 	xfer = udev->ctrl_xfer[0];
3102 	if (xfer) {
3103 		USB_XFER_LOCK(xfer);
3104 		no_resetup =
3105 		    ((xfer->address == udev->address) &&
3106 		    (udev->ctrl_ep_desc.wMaxPacketSize[0] ==
3107 		    udev->ddesc.bMaxPacketSize));
3108 		if (udev->flags.usb_mode == USB_MODE_DEVICE) {
3109 			if (no_resetup) {
3110 				/*
3111 				 * NOTE: checking "xfer->address" and
3112 				 * starting the USB transfer must be
3113 				 * atomic!
3114 				 */
3115 				usbd_transfer_start(xfer);
3116 			}
3117 		}
3118 		USB_XFER_UNLOCK(xfer);
3119 	} else {
3120 		no_resetup = 0;
3121 	}
3122 
3123 	if (no_resetup) {
3124 		/*
3125 	         * All parameters are exactly the same like before.
3126 	         * Just return.
3127 	         */
3128 		return;
3129 	}
3130 	/*
3131 	 * Update wMaxPacketSize for the default control endpoint:
3132 	 */
3133 	udev->ctrl_ep_desc.wMaxPacketSize[0] =
3134 	    udev->ddesc.bMaxPacketSize;
3135 
3136 	/*
3137 	 * Unsetup any existing USB transfer:
3138 	 */
3139 	usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX);
3140 
3141 	/*
3142 	 * Reset clear stall error counter.
3143 	 */
3144 	udev->clear_stall_errors = 0;
3145 
3146 	/*
3147 	 * Try to setup a new USB transfer for the
3148 	 * default control endpoint:
3149 	 */
3150 	iface_index = 0;
3151 	if (usbd_transfer_setup(udev, &iface_index,
3152 	    udev->ctrl_xfer, usb_control_ep_cfg, USB_CTRL_XFER_MAX, NULL,
3153 	    &udev->device_mtx)) {
3154 		DPRINTFN(0, "could not setup default "
3155 		    "USB transfer\n");
3156 	} else {
3157 		goto repeat;
3158 	}
3159 }
3160 
3161 /*------------------------------------------------------------------------*
3162  *	usbd_clear_data_toggle - factored out code
3163  *
3164  * NOTE: the intention of this function is not to reset the hardware
3165  * data toggle.
3166  *------------------------------------------------------------------------*/
3167 void
3168 usbd_clear_stall_locked(struct usb_device *udev, struct usb_endpoint *ep)
3169 {
3170 	USB_BUS_LOCK_ASSERT(udev->bus, MA_OWNED);
3171 
3172 	/* check that we have a valid case */
3173 	if (udev->flags.usb_mode == USB_MODE_HOST &&
3174 	    udev->parent_hub != NULL &&
3175 	    udev->bus->methods->clear_stall != NULL &&
3176 	    ep->methods != NULL) {
3177 		(udev->bus->methods->clear_stall) (udev, ep);
3178 	}
3179 }
3180 
3181 /*------------------------------------------------------------------------*
3182  *	usbd_clear_data_toggle - factored out code
3183  *
3184  * NOTE: the intention of this function is not to reset the hardware
3185  * data toggle on the USB device side.
3186  *------------------------------------------------------------------------*/
3187 void
3188 usbd_clear_data_toggle(struct usb_device *udev, struct usb_endpoint *ep)
3189 {
3190 	DPRINTFN(5, "udev=%p endpoint=%p\n", udev, ep);
3191 
3192 	USB_BUS_LOCK(udev->bus);
3193 	ep->toggle_next = 0;
3194 	/* some hardware needs a callback to clear the data toggle */
3195 	usbd_clear_stall_locked(udev, ep);
3196 	USB_BUS_UNLOCK(udev->bus);
3197 }
3198 
3199 /*------------------------------------------------------------------------*
3200  *	usbd_clear_stall_callback - factored out clear stall callback
3201  *
3202  * Input parameters:
3203  *  xfer1: Clear Stall Control Transfer
3204  *  xfer2: Stalled USB Transfer
3205  *
3206  * This function is NULL safe.
3207  *
3208  * Return values:
3209  *   0: In progress
3210  *   Else: Finished
3211  *
3212  * Clear stall config example:
3213  *
3214  * static const struct usb_config my_clearstall =  {
3215  *	.type = UE_CONTROL,
3216  *	.endpoint = 0,
3217  *	.direction = UE_DIR_ANY,
3218  *	.interval = 50, //50 milliseconds
3219  *	.bufsize = sizeof(struct usb_device_request),
3220  *	.timeout = 1000, //1.000 seconds
3221  *	.callback = &my_clear_stall_callback, // **
3222  *	.usb_mode = USB_MODE_HOST,
3223  * };
3224  *
3225  * ** "my_clear_stall_callback" calls "usbd_clear_stall_callback"
3226  * passing the correct parameters.
3227  *------------------------------------------------------------------------*/
3228 uint8_t
3229 usbd_clear_stall_callback(struct usb_xfer *xfer1,
3230     struct usb_xfer *xfer2)
3231 {
3232 	struct usb_device_request req;
3233 
3234 	if (xfer2 == NULL) {
3235 		/* looks like we are tearing down */
3236 		DPRINTF("NULL input parameter\n");
3237 		return (0);
3238 	}
3239 	USB_XFER_LOCK_ASSERT(xfer1, MA_OWNED);
3240 	USB_XFER_LOCK_ASSERT(xfer2, MA_OWNED);
3241 
3242 	switch (USB_GET_STATE(xfer1)) {
3243 	case USB_ST_SETUP:
3244 
3245 		/*
3246 		 * pre-clear the data toggle to DATA0 ("umass.c" and
3247 		 * "ata-usb.c" depends on this)
3248 		 */
3249 
3250 		usbd_clear_data_toggle(xfer2->xroot->udev, xfer2->endpoint);
3251 
3252 		/* setup a clear-stall packet */
3253 
3254 		req.bmRequestType = UT_WRITE_ENDPOINT;
3255 		req.bRequest = UR_CLEAR_FEATURE;
3256 		USETW(req.wValue, UF_ENDPOINT_HALT);
3257 		req.wIndex[0] = xfer2->endpoint->edesc->bEndpointAddress;
3258 		req.wIndex[1] = 0;
3259 		USETW(req.wLength, 0);
3260 
3261 		/*
3262 		 * "usbd_transfer_setup_sub()" will ensure that
3263 		 * we have sufficient room in the buffer for
3264 		 * the request structure!
3265 		 */
3266 
3267 		/* copy in the transfer */
3268 
3269 		usbd_copy_in(xfer1->frbuffers, 0, &req, sizeof(req));
3270 
3271 		/* set length */
3272 		xfer1->frlengths[0] = sizeof(req);
3273 		xfer1->nframes = 1;
3274 
3275 		usbd_transfer_submit(xfer1);
3276 		return (0);
3277 
3278 	case USB_ST_TRANSFERRED:
3279 		break;
3280 
3281 	default:			/* Error */
3282 		if (xfer1->error == USB_ERR_CANCELLED) {
3283 			return (0);
3284 		}
3285 		break;
3286 	}
3287 	return (1);			/* Clear Stall Finished */
3288 }
3289 
3290 /*------------------------------------------------------------------------*
3291  *	usbd_transfer_poll
3292  *
3293  * The following function gets called from the USB keyboard driver and
3294  * UMASS when the system has paniced.
3295  *
3296  * NOTE: It is currently not possible to resume normal operation on
3297  * the USB controller which has been polled, due to clearing of the
3298  * "up_dsleep" and "up_msleep" flags.
3299  *------------------------------------------------------------------------*/
3300 void
3301 usbd_transfer_poll(struct usb_xfer **ppxfer, uint16_t max)
3302 {
3303 	struct usb_xfer *xfer;
3304 	struct usb_xfer_root *xroot;
3305 	struct usb_device *udev;
3306 	struct usb_proc_msg *pm;
3307 	struct usb_bus *bus;
3308 	uint16_t n;
3309 	uint16_t drop_bus_spin;
3310 	uint16_t drop_bus;
3311 	uint16_t drop_xfer;
3312 
3313 	for (n = 0; n != max; n++) {
3314 		/* Extra checks to avoid panic */
3315 		xfer = ppxfer[n];
3316 		if (xfer == NULL)
3317 			continue;	/* no USB transfer */
3318 		xroot = xfer->xroot;
3319 		if (xroot == NULL)
3320 			continue;	/* no USB root */
3321 		udev = xroot->udev;
3322 		if (udev == NULL)
3323 			continue;	/* no USB device */
3324 		bus = udev->bus;
3325 		if (bus == NULL)
3326 			continue;	/* no BUS structure */
3327 		if (bus->methods == NULL)
3328 			continue;	/* no BUS methods */
3329 		if (bus->methods->xfer_poll == NULL)
3330 			continue;	/* no poll method */
3331 
3332 		drop_bus_spin = 0;
3333 		drop_bus = 0;
3334 		drop_xfer = 0;
3335 
3336 		if (USB_IN_POLLING_MODE_FUNC() == 0) {
3337 			/* make sure that the BUS spin mutex is not locked */
3338 			while (mtx_owned(&bus->bus_spin_lock)) {
3339 				mtx_unlock_spin(&bus->bus_spin_lock);
3340 				drop_bus_spin++;
3341 			}
3342 
3343 			/* make sure that the BUS mutex is not locked */
3344 			while (mtx_owned(&bus->bus_mtx)) {
3345 				mtx_unlock(&bus->bus_mtx);
3346 				drop_bus++;
3347 			}
3348 
3349 			/* make sure that the transfer mutex is not locked */
3350 			while (mtx_owned(xroot->xfer_mtx)) {
3351 				mtx_unlock(xroot->xfer_mtx);
3352 				drop_xfer++;
3353 			}
3354 		}
3355 
3356 		/* Make sure cv_signal() and cv_broadcast() is not called */
3357 		USB_BUS_CONTROL_XFER_PROC(bus)->up_msleep = 0;
3358 		USB_BUS_EXPLORE_PROC(bus)->up_msleep = 0;
3359 		USB_BUS_GIANT_PROC(bus)->up_msleep = 0;
3360 		USB_BUS_NON_GIANT_ISOC_PROC(bus)->up_msleep = 0;
3361 		USB_BUS_NON_GIANT_BULK_PROC(bus)->up_msleep = 0;
3362 
3363 		/* poll USB hardware */
3364 		(bus->methods->xfer_poll) (bus);
3365 
3366 		USB_BUS_LOCK(xroot->bus);
3367 
3368 		/* check for clear stall */
3369 		if (udev->ctrl_xfer[1] != NULL) {
3370 
3371 			/* poll clear stall start */
3372 			pm = &udev->cs_msg[0].hdr;
3373 			(pm->pm_callback) (pm);
3374 			/* poll clear stall done thread */
3375 			pm = &udev->ctrl_xfer[1]->
3376 			    xroot->done_m[0].hdr;
3377 			(pm->pm_callback) (pm);
3378 		}
3379 
3380 		/* poll done thread */
3381 		pm = &xroot->done_m[0].hdr;
3382 		(pm->pm_callback) (pm);
3383 
3384 		USB_BUS_UNLOCK(xroot->bus);
3385 
3386 		/* restore transfer mutex */
3387 		while (drop_xfer--)
3388 			mtx_lock(xroot->xfer_mtx);
3389 
3390 		/* restore BUS mutex */
3391 		while (drop_bus--)
3392 			mtx_lock(&bus->bus_mtx);
3393 
3394 		/* restore BUS spin mutex */
3395 		while (drop_bus_spin--)
3396 			mtx_lock_spin(&bus->bus_spin_lock);
3397 	}
3398 }
3399 
3400 static void
3401 usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
3402     uint8_t type, enum usb_dev_speed speed)
3403 {
3404 	static const uint16_t intr_range_max[USB_SPEED_MAX] = {
3405 		[USB_SPEED_LOW] = 8,
3406 		[USB_SPEED_FULL] = 64,
3407 		[USB_SPEED_HIGH] = 1024,
3408 		[USB_SPEED_VARIABLE] = 1024,
3409 		[USB_SPEED_SUPER] = 1024,
3410 	};
3411 
3412 	static const uint16_t isoc_range_max[USB_SPEED_MAX] = {
3413 		[USB_SPEED_LOW] = 0,	/* invalid */
3414 		[USB_SPEED_FULL] = 1023,
3415 		[USB_SPEED_HIGH] = 1024,
3416 		[USB_SPEED_VARIABLE] = 3584,
3417 		[USB_SPEED_SUPER] = 1024,
3418 	};
3419 
3420 	static const uint16_t control_min[USB_SPEED_MAX] = {
3421 		[USB_SPEED_LOW] = 8,
3422 		[USB_SPEED_FULL] = 8,
3423 		[USB_SPEED_HIGH] = 64,
3424 		[USB_SPEED_VARIABLE] = 512,
3425 		[USB_SPEED_SUPER] = 512,
3426 	};
3427 
3428 	static const uint16_t bulk_min[USB_SPEED_MAX] = {
3429 		[USB_SPEED_LOW] = 8,
3430 		[USB_SPEED_FULL] = 8,
3431 		[USB_SPEED_HIGH] = 512,
3432 		[USB_SPEED_VARIABLE] = 512,
3433 		[USB_SPEED_SUPER] = 1024,
3434 	};
3435 
3436 	uint16_t temp;
3437 
3438 	memset(ptr, 0, sizeof(*ptr));
3439 
3440 	switch (type) {
3441 	case UE_INTERRUPT:
3442 		ptr->range.max = intr_range_max[speed];
3443 		break;
3444 	case UE_ISOCHRONOUS:
3445 		ptr->range.max = isoc_range_max[speed];
3446 		break;
3447 	default:
3448 		if (type == UE_BULK)
3449 			temp = bulk_min[speed];
3450 		else /* UE_CONTROL */
3451 			temp = control_min[speed];
3452 
3453 		/* default is fixed */
3454 		ptr->fixed[0] = temp;
3455 		ptr->fixed[1] = temp;
3456 		ptr->fixed[2] = temp;
3457 		ptr->fixed[3] = temp;
3458 
3459 		if (speed == USB_SPEED_FULL) {
3460 			/* multiple sizes */
3461 			ptr->fixed[1] = 16;
3462 			ptr->fixed[2] = 32;
3463 			ptr->fixed[3] = 64;
3464 		}
3465 		if ((speed == USB_SPEED_VARIABLE) &&
3466 		    (type == UE_BULK)) {
3467 			/* multiple sizes */
3468 			ptr->fixed[2] = 1024;
3469 			ptr->fixed[3] = 1536;
3470 		}
3471 		break;
3472 	}
3473 }
3474 
3475 void	*
3476 usbd_xfer_softc(struct usb_xfer *xfer)
3477 {
3478 	return (xfer->priv_sc);
3479 }
3480 
3481 void *
3482 usbd_xfer_get_priv(struct usb_xfer *xfer)
3483 {
3484 	return (xfer->priv_fifo);
3485 }
3486 
3487 void
3488 usbd_xfer_set_priv(struct usb_xfer *xfer, void *ptr)
3489 {
3490 	xfer->priv_fifo = ptr;
3491 }
3492 
3493 uint8_t
3494 usbd_xfer_state(struct usb_xfer *xfer)
3495 {
3496 	return (xfer->usb_state);
3497 }
3498 
3499 void
3500 usbd_xfer_set_flag(struct usb_xfer *xfer, int flag)
3501 {
3502 	switch (flag) {
3503 		case USB_FORCE_SHORT_XFER:
3504 			xfer->flags.force_short_xfer = 1;
3505 			break;
3506 		case USB_SHORT_XFER_OK:
3507 			xfer->flags.short_xfer_ok = 1;
3508 			break;
3509 		case USB_MULTI_SHORT_OK:
3510 			xfer->flags.short_frames_ok = 1;
3511 			break;
3512 		case USB_MANUAL_STATUS:
3513 			xfer->flags.manual_status = 1;
3514 			break;
3515 	}
3516 }
3517 
3518 void
3519 usbd_xfer_clr_flag(struct usb_xfer *xfer, int flag)
3520 {
3521 	switch (flag) {
3522 		case USB_FORCE_SHORT_XFER:
3523 			xfer->flags.force_short_xfer = 0;
3524 			break;
3525 		case USB_SHORT_XFER_OK:
3526 			xfer->flags.short_xfer_ok = 0;
3527 			break;
3528 		case USB_MULTI_SHORT_OK:
3529 			xfer->flags.short_frames_ok = 0;
3530 			break;
3531 		case USB_MANUAL_STATUS:
3532 			xfer->flags.manual_status = 0;
3533 			break;
3534 	}
3535 }
3536 
3537 /*
3538  * The following function returns in milliseconds when the isochronous
3539  * transfer was completed by the hardware. The returned value wraps
3540  * around 65536 milliseconds.
3541  */
3542 uint16_t
3543 usbd_xfer_get_timestamp(struct usb_xfer *xfer)
3544 {
3545 	return (xfer->isoc_time_complete);
3546 }
3547 
3548 /*
3549  * The following function returns non-zero if the max packet size
3550  * field was clamped to a valid value. Else it returns zero.
3551  */
3552 uint8_t
3553 usbd_xfer_maxp_was_clamped(struct usb_xfer *xfer)
3554 {
3555 	return (xfer->flags_int.maxp_was_clamped);
3556 }
3557