xref: /freebsd/sys/dev/usb/usb_transfer.c (revision ec0e626bafb335b30c499d06066997f54b10c092)
1 /* $FreeBSD$ */
2 /*-
3  * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #ifdef USB_GLOBAL_INCLUDE_FILE
28 #include USB_GLOBAL_INCLUDE_FILE
29 #else
30 #include <sys/stdint.h>
31 #include <sys/stddef.h>
32 #include <sys/param.h>
33 #include <sys/queue.h>
34 #include <sys/types.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/bus.h>
38 #include <sys/module.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/condvar.h>
42 #include <sys/sysctl.h>
43 #include <sys/sx.h>
44 #include <sys/unistd.h>
45 #include <sys/callout.h>
46 #include <sys/malloc.h>
47 #include <sys/priv.h>
48 #include <sys/proc.h>
49 
50 #include <dev/usb/usb.h>
51 #include <dev/usb/usbdi.h>
52 #include <dev/usb/usbdi_util.h>
53 
54 #define	USB_DEBUG_VAR usb_debug
55 
56 #include <dev/usb/usb_core.h>
57 #include <dev/usb/usb_busdma.h>
58 #include <dev/usb/usb_process.h>
59 #include <dev/usb/usb_transfer.h>
60 #include <dev/usb/usb_device.h>
61 #include <dev/usb/usb_debug.h>
62 #include <dev/usb/usb_util.h>
63 
64 #include <dev/usb/usb_controller.h>
65 #include <dev/usb/usb_bus.h>
66 #include <dev/usb/usb_pf.h>
67 #endif			/* USB_GLOBAL_INCLUDE_FILE */
68 
69 struct usb_std_packet_size {
70 	struct {
71 		uint16_t min;		/* inclusive */
72 		uint16_t max;		/* inclusive */
73 	}	range;
74 
75 	uint16_t fixed[4];
76 };
77 
78 static usb_callback_t usb_request_callback;
79 
80 static const struct usb_config usb_control_ep_cfg[USB_CTRL_XFER_MAX] = {
81 
82 	/* This transfer is used for generic control endpoint transfers */
83 
84 	[0] = {
85 		.type = UE_CONTROL,
86 		.endpoint = 0x00,	/* Control endpoint */
87 		.direction = UE_DIR_ANY,
88 		.bufsize = USB_EP0_BUFSIZE,	/* bytes */
89 		.flags = {.proxy_buffer = 1,},
90 		.callback = &usb_request_callback,
91 		.usb_mode = USB_MODE_DUAL,	/* both modes */
92 	},
93 
94 	/* This transfer is used for generic clear stall only */
95 
96 	[1] = {
97 		.type = UE_CONTROL,
98 		.endpoint = 0x00,	/* Control pipe */
99 		.direction = UE_DIR_ANY,
100 		.bufsize = sizeof(struct usb_device_request),
101 		.callback = &usb_do_clear_stall_callback,
102 		.timeout = 1000,	/* 1 second */
103 		.interval = 50,	/* 50ms */
104 		.usb_mode = USB_MODE_HOST,
105 	},
106 };
107 
108 /* function prototypes */
109 
110 static void	usbd_update_max_frame_size(struct usb_xfer *);
111 static void	usbd_transfer_unsetup_sub(struct usb_xfer_root *, uint8_t);
112 static void	usbd_control_transfer_init(struct usb_xfer *);
113 static int	usbd_setup_ctrl_transfer(struct usb_xfer *);
114 static void	usb_callback_proc(struct usb_proc_msg *);
115 static void	usbd_callback_ss_done_defer(struct usb_xfer *);
116 static void	usbd_callback_wrapper(struct usb_xfer_queue *);
117 static void	usbd_transfer_start_cb(void *);
118 static uint8_t	usbd_callback_wrapper_sub(struct usb_xfer *);
119 static void	usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
120 		    uint8_t type, enum usb_dev_speed speed);
121 
122 /*------------------------------------------------------------------------*
123  *	usb_request_callback
124  *------------------------------------------------------------------------*/
125 static void
126 usb_request_callback(struct usb_xfer *xfer, usb_error_t error)
127 {
128 	if (xfer->flags_int.usb_mode == USB_MODE_DEVICE)
129 		usb_handle_request_callback(xfer, error);
130 	else
131 		usbd_do_request_callback(xfer, error);
132 }
133 
134 /*------------------------------------------------------------------------*
135  *	usbd_update_max_frame_size
136  *
137  * This function updates the maximum frame size, hence high speed USB
138  * can transfer multiple consecutive packets.
139  *------------------------------------------------------------------------*/
140 static void
141 usbd_update_max_frame_size(struct usb_xfer *xfer)
142 {
143 	/* compute maximum frame size */
144 	/* this computation should not overflow 16-bit */
145 	/* max = 15 * 1024 */
146 
147 	xfer->max_frame_size = xfer->max_packet_size * xfer->max_packet_count;
148 }
149 
150 /*------------------------------------------------------------------------*
151  *	usbd_get_dma_delay
152  *
153  * The following function is called when we need to
154  * synchronize with DMA hardware.
155  *
156  * Returns:
157  *    0: no DMA delay required
158  * Else: milliseconds of DMA delay
159  *------------------------------------------------------------------------*/
160 usb_timeout_t
161 usbd_get_dma_delay(struct usb_device *udev)
162 {
163 	const struct usb_bus_methods *mtod;
164 	uint32_t temp;
165 
166 	mtod = udev->bus->methods;
167 	temp = 0;
168 
169 	if (mtod->get_dma_delay) {
170 		(mtod->get_dma_delay) (udev, &temp);
171 		/*
172 		 * Round up and convert to milliseconds. Note that we use
173 		 * 1024 milliseconds per second. to save a division.
174 		 */
175 		temp += 0x3FF;
176 		temp /= 0x400;
177 	}
178 	return (temp);
179 }
180 
181 /*------------------------------------------------------------------------*
182  *	usbd_transfer_setup_sub_malloc
183  *
184  * This function will allocate one or more DMA'able memory chunks
185  * according to "size", "align" and "count" arguments. "ppc" is
186  * pointed to a linear array of USB page caches afterwards.
187  *
188  * If the "align" argument is equal to "1" a non-contiguous allocation
189  * can happen. Else if the "align" argument is greater than "1", the
190  * allocation will always be contiguous in memory.
191  *
192  * Returns:
193  *    0: Success
194  * Else: Failure
195  *------------------------------------------------------------------------*/
196 #if USB_HAVE_BUSDMA
197 uint8_t
198 usbd_transfer_setup_sub_malloc(struct usb_setup_params *parm,
199     struct usb_page_cache **ppc, usb_size_t size, usb_size_t align,
200     usb_size_t count)
201 {
202 	struct usb_page_cache *pc;
203 	struct usb_page *pg;
204 	void *buf;
205 	usb_size_t n_dma_pc;
206 	usb_size_t n_dma_pg;
207 	usb_size_t n_obj;
208 	usb_size_t x;
209 	usb_size_t y;
210 	usb_size_t r;
211 	usb_size_t z;
212 
213 	USB_ASSERT(align > 0, ("Invalid alignment, 0x%08x\n",
214 	    align));
215 	USB_ASSERT(size > 0, ("Invalid size = 0\n"));
216 
217 	if (count == 0) {
218 		return (0);		/* nothing to allocate */
219 	}
220 	/*
221 	 * Make sure that the size is aligned properly.
222 	 */
223 	size = -((-size) & (-align));
224 
225 	/*
226 	 * Try multi-allocation chunks to reduce the number of DMA
227 	 * allocations, hence DMA allocations are slow.
228 	 */
229 	if (align == 1) {
230 		/* special case - non-cached multi page DMA memory */
231 		n_dma_pc = count;
232 		n_dma_pg = (2 + (size / USB_PAGE_SIZE));
233 		n_obj = 1;
234 	} else if (size >= USB_PAGE_SIZE) {
235 		n_dma_pc = count;
236 		n_dma_pg = 1;
237 		n_obj = 1;
238 	} else {
239 		/* compute number of objects per page */
240 #ifdef USB_DMA_SINGLE_ALLOC
241 		n_obj = 1;
242 #else
243 		n_obj = (USB_PAGE_SIZE / size);
244 #endif
245 		/*
246 		 * Compute number of DMA chunks, rounded up
247 		 * to nearest one:
248 		 */
249 		n_dma_pc = ((count + n_obj - 1) / n_obj);
250 		n_dma_pg = 1;
251 	}
252 
253 	/*
254 	 * DMA memory is allocated once, but mapped twice. That's why
255 	 * there is one list for auto-free and another list for
256 	 * non-auto-free which only holds the mapping and not the
257 	 * allocation.
258 	 */
259 	if (parm->buf == NULL) {
260 		/* reserve memory (auto-free) */
261 		parm->dma_page_ptr += n_dma_pc * n_dma_pg;
262 		parm->dma_page_cache_ptr += n_dma_pc;
263 
264 		/* reserve memory (no-auto-free) */
265 		parm->dma_page_ptr += count * n_dma_pg;
266 		parm->xfer_page_cache_ptr += count;
267 		return (0);
268 	}
269 	for (x = 0; x != n_dma_pc; x++) {
270 		/* need to initialize the page cache */
271 		parm->dma_page_cache_ptr[x].tag_parent =
272 		    &parm->curr_xfer->xroot->dma_parent_tag;
273 	}
274 	for (x = 0; x != count; x++) {
275 		/* need to initialize the page cache */
276 		parm->xfer_page_cache_ptr[x].tag_parent =
277 		    &parm->curr_xfer->xroot->dma_parent_tag;
278 	}
279 
280 	if (ppc != NULL) {
281 		if (n_obj != 1)
282 			*ppc = parm->xfer_page_cache_ptr;
283 		else
284 			*ppc = parm->dma_page_cache_ptr;
285 	}
286 	r = count;			/* set remainder count */
287 	z = n_obj * size;		/* set allocation size */
288 	pc = parm->xfer_page_cache_ptr;
289 	pg = parm->dma_page_ptr;
290 
291 	if (n_obj == 1) {
292 	    /*
293 	     * Avoid mapping memory twice if only a single object
294 	     * should be allocated per page cache:
295 	     */
296 	    for (x = 0; x != n_dma_pc; x++) {
297 		if (usb_pc_alloc_mem(parm->dma_page_cache_ptr,
298 		    pg, z, align)) {
299 			return (1);	/* failure */
300 		}
301 		/* Make room for one DMA page cache and "n_dma_pg" pages */
302 		parm->dma_page_cache_ptr++;
303 		pg += n_dma_pg;
304 	    }
305 	} else {
306 	    for (x = 0; x != n_dma_pc; x++) {
307 
308 		if (r < n_obj) {
309 			/* compute last remainder */
310 			z = r * size;
311 			n_obj = r;
312 		}
313 		if (usb_pc_alloc_mem(parm->dma_page_cache_ptr,
314 		    pg, z, align)) {
315 			return (1);	/* failure */
316 		}
317 		/* Set beginning of current buffer */
318 		buf = parm->dma_page_cache_ptr->buffer;
319 		/* Make room for one DMA page cache and "n_dma_pg" pages */
320 		parm->dma_page_cache_ptr++;
321 		pg += n_dma_pg;
322 
323 		for (y = 0; (y != n_obj); y++, r--, pc++, pg += n_dma_pg) {
324 
325 			/* Load sub-chunk into DMA */
326 			if (usb_pc_dmamap_create(pc, size)) {
327 				return (1);	/* failure */
328 			}
329 			pc->buffer = USB_ADD_BYTES(buf, y * size);
330 			pc->page_start = pg;
331 
332 			mtx_lock(pc->tag_parent->mtx);
333 			if (usb_pc_load_mem(pc, size, 1 /* synchronous */ )) {
334 				mtx_unlock(pc->tag_parent->mtx);
335 				return (1);	/* failure */
336 			}
337 			mtx_unlock(pc->tag_parent->mtx);
338 		}
339 	    }
340 	}
341 
342 	parm->xfer_page_cache_ptr = pc;
343 	parm->dma_page_ptr = pg;
344 	return (0);
345 }
346 #endif
347 
348 /*------------------------------------------------------------------------*
349  *	usbd_transfer_setup_sub - transfer setup subroutine
350  *
351  * This function must be called from the "xfer_setup" callback of the
352  * USB Host or Device controller driver when setting up an USB
353  * transfer. This function will setup correct packet sizes, buffer
354  * sizes, flags and more, that are stored in the "usb_xfer"
355  * structure.
356  *------------------------------------------------------------------------*/
357 void
358 usbd_transfer_setup_sub(struct usb_setup_params *parm)
359 {
360 	enum {
361 		REQ_SIZE = 8,
362 		MIN_PKT = 8,
363 	};
364 	struct usb_xfer *xfer = parm->curr_xfer;
365 	const struct usb_config *setup = parm->curr_setup;
366 	struct usb_endpoint_ss_comp_descriptor *ecomp;
367 	struct usb_endpoint_descriptor *edesc;
368 	struct usb_std_packet_size std_size;
369 	usb_frcount_t n_frlengths;
370 	usb_frcount_t n_frbuffers;
371 	usb_frcount_t x;
372 	uint16_t maxp_old;
373 	uint8_t type;
374 	uint8_t zmps;
375 
376 	/*
377 	 * Sanity check. The following parameters must be initialized before
378 	 * calling this function.
379 	 */
380 	if ((parm->hc_max_packet_size == 0) ||
381 	    (parm->hc_max_packet_count == 0) ||
382 	    (parm->hc_max_frame_size == 0)) {
383 		parm->err = USB_ERR_INVAL;
384 		goto done;
385 	}
386 	edesc = xfer->endpoint->edesc;
387 	ecomp = xfer->endpoint->ecomp;
388 
389 	type = (edesc->bmAttributes & UE_XFERTYPE);
390 
391 	xfer->flags = setup->flags;
392 	xfer->nframes = setup->frames;
393 	xfer->timeout = setup->timeout;
394 	xfer->callback = setup->callback;
395 	xfer->interval = setup->interval;
396 	xfer->endpointno = edesc->bEndpointAddress;
397 	xfer->max_packet_size = UGETW(edesc->wMaxPacketSize);
398 	xfer->max_packet_count = 1;
399 	/* make a shadow copy: */
400 	xfer->flags_int.usb_mode = parm->udev->flags.usb_mode;
401 
402 	parm->bufsize = setup->bufsize;
403 
404 	switch (parm->speed) {
405 	case USB_SPEED_HIGH:
406 		switch (type) {
407 		case UE_ISOCHRONOUS:
408 		case UE_INTERRUPT:
409 			xfer->max_packet_count +=
410 			    (xfer->max_packet_size >> 11) & 3;
411 
412 			/* check for invalid max packet count */
413 			if (xfer->max_packet_count > 3)
414 				xfer->max_packet_count = 3;
415 			break;
416 		default:
417 			break;
418 		}
419 		xfer->max_packet_size &= 0x7FF;
420 		break;
421 	case USB_SPEED_SUPER:
422 		xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3;
423 
424 		if (ecomp != NULL)
425 			xfer->max_packet_count += ecomp->bMaxBurst;
426 
427 		if ((xfer->max_packet_count == 0) ||
428 		    (xfer->max_packet_count > 16))
429 			xfer->max_packet_count = 16;
430 
431 		switch (type) {
432 		case UE_CONTROL:
433 			xfer->max_packet_count = 1;
434 			break;
435 		case UE_ISOCHRONOUS:
436 			if (ecomp != NULL) {
437 				uint8_t mult;
438 
439 				mult = UE_GET_SS_ISO_MULT(
440 				    ecomp->bmAttributes) + 1;
441 				if (mult > 3)
442 					mult = 3;
443 
444 				xfer->max_packet_count *= mult;
445 			}
446 			break;
447 		default:
448 			break;
449 		}
450 		xfer->max_packet_size &= 0x7FF;
451 		break;
452 	default:
453 		break;
454 	}
455 	/* range check "max_packet_count" */
456 
457 	if (xfer->max_packet_count > parm->hc_max_packet_count) {
458 		xfer->max_packet_count = parm->hc_max_packet_count;
459 	}
460 
461 	/* store max packet size value before filtering */
462 
463 	maxp_old = xfer->max_packet_size;
464 
465 	/* filter "wMaxPacketSize" according to HC capabilities */
466 
467 	if ((xfer->max_packet_size > parm->hc_max_packet_size) ||
468 	    (xfer->max_packet_size == 0)) {
469 		xfer->max_packet_size = parm->hc_max_packet_size;
470 	}
471 	/* filter "wMaxPacketSize" according to standard sizes */
472 
473 	usbd_get_std_packet_size(&std_size, type, parm->speed);
474 
475 	if (std_size.range.min || std_size.range.max) {
476 
477 		if (xfer->max_packet_size < std_size.range.min) {
478 			xfer->max_packet_size = std_size.range.min;
479 		}
480 		if (xfer->max_packet_size > std_size.range.max) {
481 			xfer->max_packet_size = std_size.range.max;
482 		}
483 	} else {
484 
485 		if (xfer->max_packet_size >= std_size.fixed[3]) {
486 			xfer->max_packet_size = std_size.fixed[3];
487 		} else if (xfer->max_packet_size >= std_size.fixed[2]) {
488 			xfer->max_packet_size = std_size.fixed[2];
489 		} else if (xfer->max_packet_size >= std_size.fixed[1]) {
490 			xfer->max_packet_size = std_size.fixed[1];
491 		} else {
492 			/* only one possibility left */
493 			xfer->max_packet_size = std_size.fixed[0];
494 		}
495 	}
496 
497 	/*
498 	 * Check if the max packet size was outside its allowed range
499 	 * and clamped to a valid value:
500 	 */
501 	if (maxp_old != xfer->max_packet_size)
502 		xfer->flags_int.maxp_was_clamped = 1;
503 
504 	/* compute "max_frame_size" */
505 
506 	usbd_update_max_frame_size(xfer);
507 
508 	/* check interrupt interval and transfer pre-delay */
509 
510 	if (type == UE_ISOCHRONOUS) {
511 
512 		uint16_t frame_limit;
513 
514 		xfer->interval = 0;	/* not used, must be zero */
515 		xfer->flags_int.isochronous_xfr = 1;	/* set flag */
516 
517 		if (xfer->timeout == 0) {
518 			/*
519 			 * set a default timeout in
520 			 * case something goes wrong!
521 			 */
522 			xfer->timeout = 1000 / 4;
523 		}
524 		switch (parm->speed) {
525 		case USB_SPEED_LOW:
526 		case USB_SPEED_FULL:
527 			frame_limit = USB_MAX_FS_ISOC_FRAMES_PER_XFER;
528 			xfer->fps_shift = 0;
529 			break;
530 		default:
531 			frame_limit = USB_MAX_HS_ISOC_FRAMES_PER_XFER;
532 			xfer->fps_shift = edesc->bInterval;
533 			if (xfer->fps_shift > 0)
534 				xfer->fps_shift--;
535 			if (xfer->fps_shift > 3)
536 				xfer->fps_shift = 3;
537 			if (xfer->flags.pre_scale_frames != 0)
538 				xfer->nframes <<= (3 - xfer->fps_shift);
539 			break;
540 		}
541 
542 		if (xfer->nframes > frame_limit) {
543 			/*
544 			 * this is not going to work
545 			 * cross hardware
546 			 */
547 			parm->err = USB_ERR_INVAL;
548 			goto done;
549 		}
550 		if (xfer->nframes == 0) {
551 			/*
552 			 * this is not a valid value
553 			 */
554 			parm->err = USB_ERR_ZERO_NFRAMES;
555 			goto done;
556 		}
557 	} else {
558 
559 		/*
560 		 * If a value is specified use that else check the
561 		 * endpoint descriptor!
562 		 */
563 		if (type == UE_INTERRUPT) {
564 
565 			uint32_t temp;
566 
567 			if (xfer->interval == 0) {
568 
569 				xfer->interval = edesc->bInterval;
570 
571 				switch (parm->speed) {
572 				case USB_SPEED_LOW:
573 				case USB_SPEED_FULL:
574 					break;
575 				default:
576 					/* 125us -> 1ms */
577 					if (xfer->interval < 4)
578 						xfer->interval = 1;
579 					else if (xfer->interval > 16)
580 						xfer->interval = (1 << (16 - 4));
581 					else
582 						xfer->interval =
583 						    (1 << (xfer->interval - 4));
584 					break;
585 				}
586 			}
587 
588 			if (xfer->interval == 0) {
589 				/*
590 				 * One millisecond is the smallest
591 				 * interval we support:
592 				 */
593 				xfer->interval = 1;
594 			}
595 
596 			xfer->fps_shift = 0;
597 			temp = 1;
598 
599 			while ((temp != 0) && (temp < xfer->interval)) {
600 				xfer->fps_shift++;
601 				temp *= 2;
602 			}
603 
604 			switch (parm->speed) {
605 			case USB_SPEED_LOW:
606 			case USB_SPEED_FULL:
607 				break;
608 			default:
609 				xfer->fps_shift += 3;
610 				break;
611 			}
612 		}
613 	}
614 
615 	/*
616 	 * NOTE: we do not allow "max_packet_size" or "max_frame_size"
617 	 * to be equal to zero when setting up USB transfers, hence
618 	 * this leads to alot of extra code in the USB kernel.
619 	 */
620 
621 	if ((xfer->max_frame_size == 0) ||
622 	    (xfer->max_packet_size == 0)) {
623 
624 		zmps = 1;
625 
626 		if ((parm->bufsize <= MIN_PKT) &&
627 		    (type != UE_CONTROL) &&
628 		    (type != UE_BULK)) {
629 
630 			/* workaround */
631 			xfer->max_packet_size = MIN_PKT;
632 			xfer->max_packet_count = 1;
633 			parm->bufsize = 0;	/* automatic setup length */
634 			usbd_update_max_frame_size(xfer);
635 
636 		} else {
637 			parm->err = USB_ERR_ZERO_MAXP;
638 			goto done;
639 		}
640 
641 	} else {
642 		zmps = 0;
643 	}
644 
645 	/*
646 	 * check if we should setup a default
647 	 * length:
648 	 */
649 
650 	if (parm->bufsize == 0) {
651 
652 		parm->bufsize = xfer->max_frame_size;
653 
654 		if (type == UE_ISOCHRONOUS) {
655 			parm->bufsize *= xfer->nframes;
656 		}
657 	}
658 	/*
659 	 * check if we are about to setup a proxy
660 	 * type of buffer:
661 	 */
662 
663 	if (xfer->flags.proxy_buffer) {
664 
665 		/* round bufsize up */
666 
667 		parm->bufsize += (xfer->max_frame_size - 1);
668 
669 		if (parm->bufsize < xfer->max_frame_size) {
670 			/* length wrapped around */
671 			parm->err = USB_ERR_INVAL;
672 			goto done;
673 		}
674 		/* subtract remainder */
675 
676 		parm->bufsize -= (parm->bufsize % xfer->max_frame_size);
677 
678 		/* add length of USB device request structure, if any */
679 
680 		if (type == UE_CONTROL) {
681 			parm->bufsize += REQ_SIZE;	/* SETUP message */
682 		}
683 	}
684 	xfer->max_data_length = parm->bufsize;
685 
686 	/* Setup "n_frlengths" and "n_frbuffers" */
687 
688 	if (type == UE_ISOCHRONOUS) {
689 		n_frlengths = xfer->nframes;
690 		n_frbuffers = 1;
691 	} else {
692 
693 		if (type == UE_CONTROL) {
694 			xfer->flags_int.control_xfr = 1;
695 			if (xfer->nframes == 0) {
696 				if (parm->bufsize <= REQ_SIZE) {
697 					/*
698 					 * there will never be any data
699 					 * stage
700 					 */
701 					xfer->nframes = 1;
702 				} else {
703 					xfer->nframes = 2;
704 				}
705 			}
706 		} else {
707 			if (xfer->nframes == 0) {
708 				xfer->nframes = 1;
709 			}
710 		}
711 
712 		n_frlengths = xfer->nframes;
713 		n_frbuffers = xfer->nframes;
714 	}
715 
716 	/*
717 	 * check if we have room for the
718 	 * USB device request structure:
719 	 */
720 
721 	if (type == UE_CONTROL) {
722 
723 		if (xfer->max_data_length < REQ_SIZE) {
724 			/* length wrapped around or too small bufsize */
725 			parm->err = USB_ERR_INVAL;
726 			goto done;
727 		}
728 		xfer->max_data_length -= REQ_SIZE;
729 	}
730 	/*
731 	 * Setup "frlengths" and shadow "frlengths" for keeping the
732 	 * initial frame lengths when a USB transfer is complete. This
733 	 * information is useful when computing isochronous offsets.
734 	 */
735 	xfer->frlengths = parm->xfer_length_ptr;
736 	parm->xfer_length_ptr += 2 * n_frlengths;
737 
738 	/* setup "frbuffers" */
739 	xfer->frbuffers = parm->xfer_page_cache_ptr;
740 	parm->xfer_page_cache_ptr += n_frbuffers;
741 
742 	/* initialize max frame count */
743 	xfer->max_frame_count = xfer->nframes;
744 
745 	/*
746 	 * check if we need to setup
747 	 * a local buffer:
748 	 */
749 
750 	if (!xfer->flags.ext_buffer) {
751 #if USB_HAVE_BUSDMA
752 		struct usb_page_search page_info;
753 		struct usb_page_cache *pc;
754 
755 		if (usbd_transfer_setup_sub_malloc(parm,
756 		    &pc, parm->bufsize, 1, 1)) {
757 			parm->err = USB_ERR_NOMEM;
758 		} else if (parm->buf != NULL) {
759 
760 			usbd_get_page(pc, 0, &page_info);
761 
762 			xfer->local_buffer = page_info.buffer;
763 
764 			usbd_xfer_set_frame_offset(xfer, 0, 0);
765 
766 			if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
767 				usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
768 			}
769 		}
770 #else
771 		/* align data */
772 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
773 
774 		if (parm->buf != NULL) {
775 			xfer->local_buffer =
776 			    USB_ADD_BYTES(parm->buf, parm->size[0]);
777 
778 			usbd_xfer_set_frame_offset(xfer, 0, 0);
779 
780 			if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
781 				usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
782 			}
783 		}
784 		parm->size[0] += parm->bufsize;
785 
786 		/* align data again */
787 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
788 #endif
789 	}
790 	/*
791 	 * Compute maximum buffer size
792 	 */
793 
794 	if (parm->bufsize_max < parm->bufsize) {
795 		parm->bufsize_max = parm->bufsize;
796 	}
797 #if USB_HAVE_BUSDMA
798 	if (xfer->flags_int.bdma_enable) {
799 		/*
800 		 * Setup "dma_page_ptr".
801 		 *
802 		 * Proof for formula below:
803 		 *
804 		 * Assume there are three USB frames having length "a", "b" and
805 		 * "c". These USB frames will at maximum need "z"
806 		 * "usb_page" structures. "z" is given by:
807 		 *
808 		 * z = ((a / USB_PAGE_SIZE) + 2) + ((b / USB_PAGE_SIZE) + 2) +
809 		 * ((c / USB_PAGE_SIZE) + 2);
810 		 *
811 		 * Constraining "a", "b" and "c" like this:
812 		 *
813 		 * (a + b + c) <= parm->bufsize
814 		 *
815 		 * We know that:
816 		 *
817 		 * z <= ((parm->bufsize / USB_PAGE_SIZE) + (3*2));
818 		 *
819 		 * Here is the general formula:
820 		 */
821 		xfer->dma_page_ptr = parm->dma_page_ptr;
822 		parm->dma_page_ptr += (2 * n_frbuffers);
823 		parm->dma_page_ptr += (parm->bufsize / USB_PAGE_SIZE);
824 	}
825 #endif
826 	if (zmps) {
827 		/* correct maximum data length */
828 		xfer->max_data_length = 0;
829 	}
830 	/* subtract USB frame remainder from "hc_max_frame_size" */
831 
832 	xfer->max_hc_frame_size =
833 	    (parm->hc_max_frame_size -
834 	    (parm->hc_max_frame_size % xfer->max_frame_size));
835 
836 	if (xfer->max_hc_frame_size == 0) {
837 		parm->err = USB_ERR_INVAL;
838 		goto done;
839 	}
840 
841 	/* initialize frame buffers */
842 
843 	if (parm->buf) {
844 		for (x = 0; x != n_frbuffers; x++) {
845 			xfer->frbuffers[x].tag_parent =
846 			    &xfer->xroot->dma_parent_tag;
847 #if USB_HAVE_BUSDMA
848 			if (xfer->flags_int.bdma_enable &&
849 			    (parm->bufsize_max > 0)) {
850 
851 				if (usb_pc_dmamap_create(
852 				    xfer->frbuffers + x,
853 				    parm->bufsize_max)) {
854 					parm->err = USB_ERR_NOMEM;
855 					goto done;
856 				}
857 			}
858 #endif
859 		}
860 	}
861 done:
862 	if (parm->err) {
863 		/*
864 		 * Set some dummy values so that we avoid division by zero:
865 		 */
866 		xfer->max_hc_frame_size = 1;
867 		xfer->max_frame_size = 1;
868 		xfer->max_packet_size = 1;
869 		xfer->max_data_length = 0;
870 		xfer->nframes = 0;
871 		xfer->max_frame_count = 0;
872 	}
873 }
874 
875 /*------------------------------------------------------------------------*
876  *	usbd_transfer_setup - setup an array of USB transfers
877  *
878  * NOTE: You must always call "usbd_transfer_unsetup" after calling
879  * "usbd_transfer_setup" if success was returned.
880  *
881  * The idea is that the USB device driver should pre-allocate all its
882  * transfers by one call to this function.
883  *
884  * Return values:
885  *    0: Success
886  * Else: Failure
887  *------------------------------------------------------------------------*/
888 usb_error_t
889 usbd_transfer_setup(struct usb_device *udev,
890     const uint8_t *ifaces, struct usb_xfer **ppxfer,
891     const struct usb_config *setup_start, uint16_t n_setup,
892     void *priv_sc, struct mtx *xfer_mtx)
893 {
894 	const struct usb_config *setup_end = setup_start + n_setup;
895 	const struct usb_config *setup;
896 	struct usb_setup_params *parm;
897 	struct usb_endpoint *ep;
898 	struct usb_xfer_root *info;
899 	struct usb_xfer *xfer;
900 	void *buf = NULL;
901 	usb_error_t error = 0;
902 	uint16_t n;
903 	uint16_t refcount;
904 	uint8_t do_unlock;
905 
906 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
907 	    "usbd_transfer_setup can sleep!");
908 
909 	/* do some checking first */
910 
911 	if (n_setup == 0) {
912 		DPRINTFN(6, "setup array has zero length!\n");
913 		return (USB_ERR_INVAL);
914 	}
915 	if (ifaces == 0) {
916 		DPRINTFN(6, "ifaces array is NULL!\n");
917 		return (USB_ERR_INVAL);
918 	}
919 	if (xfer_mtx == NULL) {
920 		DPRINTFN(6, "using global lock\n");
921 		xfer_mtx = &Giant;
922 	}
923 
924 	/* more sanity checks */
925 
926 	for (setup = setup_start, n = 0;
927 	    setup != setup_end; setup++, n++) {
928 		if (setup->bufsize == (usb_frlength_t)-1) {
929 			error = USB_ERR_BAD_BUFSIZE;
930 			DPRINTF("invalid bufsize\n");
931 		}
932 		if (setup->callback == NULL) {
933 			error = USB_ERR_NO_CALLBACK;
934 			DPRINTF("no callback\n");
935 		}
936 		ppxfer[n] = NULL;
937 	}
938 
939 	if (error)
940 		return (error);
941 
942 	/* Protect scratch area */
943 	do_unlock = usbd_enum_lock(udev);
944 
945 	refcount = 0;
946 	info = NULL;
947 
948 	parm = &udev->scratch.xfer_setup[0].parm;
949 	memset(parm, 0, sizeof(*parm));
950 
951 	parm->udev = udev;
952 	parm->speed = usbd_get_speed(udev);
953 	parm->hc_max_packet_count = 1;
954 
955 	if (parm->speed >= USB_SPEED_MAX) {
956 		parm->err = USB_ERR_INVAL;
957 		goto done;
958 	}
959 	/* setup all transfers */
960 
961 	while (1) {
962 
963 		if (buf) {
964 			/*
965 			 * Initialize the "usb_xfer_root" structure,
966 			 * which is common for all our USB transfers.
967 			 */
968 			info = USB_ADD_BYTES(buf, 0);
969 
970 			info->memory_base = buf;
971 			info->memory_size = parm->size[0];
972 
973 #if USB_HAVE_BUSDMA
974 			info->dma_page_cache_start = USB_ADD_BYTES(buf, parm->size[4]);
975 			info->dma_page_cache_end = USB_ADD_BYTES(buf, parm->size[5]);
976 #endif
977 			info->xfer_page_cache_start = USB_ADD_BYTES(buf, parm->size[5]);
978 			info->xfer_page_cache_end = USB_ADD_BYTES(buf, parm->size[2]);
979 
980 			cv_init(&info->cv_drain, "WDRAIN");
981 
982 			info->xfer_mtx = xfer_mtx;
983 #if USB_HAVE_BUSDMA
984 			usb_dma_tag_setup(&info->dma_parent_tag,
985 			    parm->dma_tag_p, udev->bus->dma_parent_tag[0].tag,
986 			    xfer_mtx, &usb_bdma_done_event, udev->bus->dma_bits,
987 			    parm->dma_tag_max);
988 #endif
989 
990 			info->bus = udev->bus;
991 			info->udev = udev;
992 
993 			TAILQ_INIT(&info->done_q.head);
994 			info->done_q.command = &usbd_callback_wrapper;
995 #if USB_HAVE_BUSDMA
996 			TAILQ_INIT(&info->dma_q.head);
997 			info->dma_q.command = &usb_bdma_work_loop;
998 #endif
999 			info->done_m[0].hdr.pm_callback = &usb_callback_proc;
1000 			info->done_m[0].xroot = info;
1001 			info->done_m[1].hdr.pm_callback = &usb_callback_proc;
1002 			info->done_m[1].xroot = info;
1003 
1004 			/*
1005 			 * In device side mode control endpoint
1006 			 * requests need to run from a separate
1007 			 * context, else there is a chance of
1008 			 * deadlock!
1009 			 */
1010 			if (setup_start == usb_control_ep_cfg)
1011 				info->done_p =
1012 				    USB_BUS_CONTROL_XFER_PROC(udev->bus);
1013 			else if (xfer_mtx == &Giant)
1014 				info->done_p =
1015 				    USB_BUS_GIANT_PROC(udev->bus);
1016 			else
1017 				info->done_p =
1018 				    USB_BUS_NON_GIANT_PROC(udev->bus);
1019 		}
1020 		/* reset sizes */
1021 
1022 		parm->size[0] = 0;
1023 		parm->buf = buf;
1024 		parm->size[0] += sizeof(info[0]);
1025 
1026 		for (setup = setup_start, n = 0;
1027 		    setup != setup_end; setup++, n++) {
1028 
1029 			/* skip USB transfers without callbacks: */
1030 			if (setup->callback == NULL) {
1031 				continue;
1032 			}
1033 			/* see if there is a matching endpoint */
1034 			ep = usbd_get_endpoint(udev,
1035 			    ifaces[setup->if_index], setup);
1036 
1037 			/*
1038 			 * Check that the USB PIPE is valid and that
1039 			 * the endpoint mode is proper.
1040 			 *
1041 			 * Make sure we don't allocate a streams
1042 			 * transfer when such a combination is not
1043 			 * valid.
1044 			 */
1045 			if ((ep == NULL) || (ep->methods == NULL) ||
1046 			    ((ep->ep_mode != USB_EP_MODE_STREAMS) &&
1047 			    (ep->ep_mode != USB_EP_MODE_DEFAULT)) ||
1048 			    (setup->stream_id != 0 &&
1049 			    (setup->stream_id >= USB_MAX_EP_STREAMS ||
1050 			    (ep->ep_mode != USB_EP_MODE_STREAMS)))) {
1051 				if (setup->flags.no_pipe_ok)
1052 					continue;
1053 				if ((setup->usb_mode != USB_MODE_DUAL) &&
1054 				    (setup->usb_mode != udev->flags.usb_mode))
1055 					continue;
1056 				parm->err = USB_ERR_NO_PIPE;
1057 				goto done;
1058 			}
1059 
1060 			/* align data properly */
1061 			parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1062 
1063 			/* store current setup pointer */
1064 			parm->curr_setup = setup;
1065 
1066 			if (buf) {
1067 				/*
1068 				 * Common initialization of the
1069 				 * "usb_xfer" structure.
1070 				 */
1071 				xfer = USB_ADD_BYTES(buf, parm->size[0]);
1072 				xfer->address = udev->address;
1073 				xfer->priv_sc = priv_sc;
1074 				xfer->xroot = info;
1075 
1076 				usb_callout_init_mtx(&xfer->timeout_handle,
1077 				    &udev->bus->bus_mtx, 0);
1078 			} else {
1079 				/*
1080 				 * Setup a dummy xfer, hence we are
1081 				 * writing to the "usb_xfer"
1082 				 * structure pointed to by "xfer"
1083 				 * before we have allocated any
1084 				 * memory:
1085 				 */
1086 				xfer = &udev->scratch.xfer_setup[0].dummy;
1087 				memset(xfer, 0, sizeof(*xfer));
1088 				refcount++;
1089 			}
1090 
1091 			/* set transfer endpoint pointer */
1092 			xfer->endpoint = ep;
1093 
1094 			/* set transfer stream ID */
1095 			xfer->stream_id = setup->stream_id;
1096 
1097 			parm->size[0] += sizeof(xfer[0]);
1098 			parm->methods = xfer->endpoint->methods;
1099 			parm->curr_xfer = xfer;
1100 
1101 			/*
1102 			 * Call the Host or Device controller transfer
1103 			 * setup routine:
1104 			 */
1105 			(udev->bus->methods->xfer_setup) (parm);
1106 
1107 			/* check for error */
1108 			if (parm->err)
1109 				goto done;
1110 
1111 			if (buf) {
1112 				/*
1113 				 * Increment the endpoint refcount. This
1114 				 * basically prevents setting a new
1115 				 * configuration and alternate setting
1116 				 * when USB transfers are in use on
1117 				 * the given interface. Search the USB
1118 				 * code for "endpoint->refcount_alloc" if you
1119 				 * want more information.
1120 				 */
1121 				USB_BUS_LOCK(info->bus);
1122 				if (xfer->endpoint->refcount_alloc >= USB_EP_REF_MAX)
1123 					parm->err = USB_ERR_INVAL;
1124 
1125 				xfer->endpoint->refcount_alloc++;
1126 
1127 				if (xfer->endpoint->refcount_alloc == 0)
1128 					panic("usbd_transfer_setup(): Refcount wrapped to zero\n");
1129 				USB_BUS_UNLOCK(info->bus);
1130 
1131 				/*
1132 				 * Whenever we set ppxfer[] then we
1133 				 * also need to increment the
1134 				 * "setup_refcount":
1135 				 */
1136 				info->setup_refcount++;
1137 
1138 				/*
1139 				 * Transfer is successfully setup and
1140 				 * can be used:
1141 				 */
1142 				ppxfer[n] = xfer;
1143 			}
1144 
1145 			/* check for error */
1146 			if (parm->err)
1147 				goto done;
1148 		}
1149 
1150 		if (buf != NULL || parm->err != 0)
1151 			goto done;
1152 
1153 		/* if no transfers, nothing to do */
1154 		if (refcount == 0)
1155 			goto done;
1156 
1157 		/* align data properly */
1158 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1159 
1160 		/* store offset temporarily */
1161 		parm->size[1] = parm->size[0];
1162 
1163 		/*
1164 		 * The number of DMA tags required depends on
1165 		 * the number of endpoints. The current estimate
1166 		 * for maximum number of DMA tags per endpoint
1167 		 * is three:
1168 		 * 1) for loading memory
1169 		 * 2) for allocating memory
1170 		 * 3) for fixing memory [UHCI]
1171 		 */
1172 		parm->dma_tag_max += 3 * MIN(n_setup, USB_EP_MAX);
1173 
1174 		/*
1175 		 * DMA tags for QH, TD, Data and more.
1176 		 */
1177 		parm->dma_tag_max += 8;
1178 
1179 		parm->dma_tag_p += parm->dma_tag_max;
1180 
1181 		parm->size[0] += ((uint8_t *)parm->dma_tag_p) -
1182 		    ((uint8_t *)0);
1183 
1184 		/* align data properly */
1185 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1186 
1187 		/* store offset temporarily */
1188 		parm->size[3] = parm->size[0];
1189 
1190 		parm->size[0] += ((uint8_t *)parm->dma_page_ptr) -
1191 		    ((uint8_t *)0);
1192 
1193 		/* align data properly */
1194 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1195 
1196 		/* store offset temporarily */
1197 		parm->size[4] = parm->size[0];
1198 
1199 		parm->size[0] += ((uint8_t *)parm->dma_page_cache_ptr) -
1200 		    ((uint8_t *)0);
1201 
1202 		/* store end offset temporarily */
1203 		parm->size[5] = parm->size[0];
1204 
1205 		parm->size[0] += ((uint8_t *)parm->xfer_page_cache_ptr) -
1206 		    ((uint8_t *)0);
1207 
1208 		/* store end offset temporarily */
1209 
1210 		parm->size[2] = parm->size[0];
1211 
1212 		/* align data properly */
1213 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1214 
1215 		parm->size[6] = parm->size[0];
1216 
1217 		parm->size[0] += ((uint8_t *)parm->xfer_length_ptr) -
1218 		    ((uint8_t *)0);
1219 
1220 		/* align data properly */
1221 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1222 
1223 		/* allocate zeroed memory */
1224 		buf = malloc(parm->size[0], M_USB, M_WAITOK | M_ZERO);
1225 
1226 		if (buf == NULL) {
1227 			parm->err = USB_ERR_NOMEM;
1228 			DPRINTFN(0, "cannot allocate memory block for "
1229 			    "configuration (%d bytes)\n",
1230 			    parm->size[0]);
1231 			goto done;
1232 		}
1233 		parm->dma_tag_p = USB_ADD_BYTES(buf, parm->size[1]);
1234 		parm->dma_page_ptr = USB_ADD_BYTES(buf, parm->size[3]);
1235 		parm->dma_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[4]);
1236 		parm->xfer_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[5]);
1237 		parm->xfer_length_ptr = USB_ADD_BYTES(buf, parm->size[6]);
1238 	}
1239 
1240 done:
1241 	if (buf) {
1242 		if (info->setup_refcount == 0) {
1243 			/*
1244 			 * "usbd_transfer_unsetup_sub" will unlock
1245 			 * the bus mutex before returning !
1246 			 */
1247 			USB_BUS_LOCK(info->bus);
1248 
1249 			/* something went wrong */
1250 			usbd_transfer_unsetup_sub(info, 0);
1251 		}
1252 	}
1253 
1254 	/* check if any errors happened */
1255 	if (parm->err)
1256 		usbd_transfer_unsetup(ppxfer, n_setup);
1257 
1258 	error = parm->err;
1259 
1260 	if (do_unlock)
1261 		usbd_enum_unlock(udev);
1262 
1263 	return (error);
1264 }
1265 
1266 /*------------------------------------------------------------------------*
1267  *	usbd_transfer_unsetup_sub - factored out code
1268  *------------------------------------------------------------------------*/
1269 static void
1270 usbd_transfer_unsetup_sub(struct usb_xfer_root *info, uint8_t needs_delay)
1271 {
1272 #if USB_HAVE_BUSDMA
1273 	struct usb_page_cache *pc;
1274 #endif
1275 
1276 	USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
1277 
1278 	/* wait for any outstanding DMA operations */
1279 
1280 	if (needs_delay) {
1281 		usb_timeout_t temp;
1282 		temp = usbd_get_dma_delay(info->udev);
1283 		if (temp != 0) {
1284 			usb_pause_mtx(&info->bus->bus_mtx,
1285 			    USB_MS_TO_TICKS(temp));
1286 		}
1287 	}
1288 
1289 	/* make sure that our done messages are not queued anywhere */
1290 	usb_proc_mwait(info->done_p, &info->done_m[0], &info->done_m[1]);
1291 
1292 	USB_BUS_UNLOCK(info->bus);
1293 
1294 #if USB_HAVE_BUSDMA
1295 	/* free DMA'able memory, if any */
1296 	pc = info->dma_page_cache_start;
1297 	while (pc != info->dma_page_cache_end) {
1298 		usb_pc_free_mem(pc);
1299 		pc++;
1300 	}
1301 
1302 	/* free DMA maps in all "xfer->frbuffers" */
1303 	pc = info->xfer_page_cache_start;
1304 	while (pc != info->xfer_page_cache_end) {
1305 		usb_pc_dmamap_destroy(pc);
1306 		pc++;
1307 	}
1308 
1309 	/* free all DMA tags */
1310 	usb_dma_tag_unsetup(&info->dma_parent_tag);
1311 #endif
1312 
1313 	cv_destroy(&info->cv_drain);
1314 
1315 	/*
1316 	 * free the "memory_base" last, hence the "info" structure is
1317 	 * contained within the "memory_base"!
1318 	 */
1319 	free(info->memory_base, M_USB);
1320 }
1321 
1322 /*------------------------------------------------------------------------*
1323  *	usbd_transfer_unsetup - unsetup/free an array of USB transfers
1324  *
1325  * NOTE: All USB transfers in progress will get called back passing
1326  * the error code "USB_ERR_CANCELLED" before this function
1327  * returns.
1328  *------------------------------------------------------------------------*/
1329 void
1330 usbd_transfer_unsetup(struct usb_xfer **pxfer, uint16_t n_setup)
1331 {
1332 	struct usb_xfer *xfer;
1333 	struct usb_xfer_root *info;
1334 	uint8_t needs_delay = 0;
1335 
1336 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1337 	    "usbd_transfer_unsetup can sleep!");
1338 
1339 	while (n_setup--) {
1340 		xfer = pxfer[n_setup];
1341 
1342 		if (xfer == NULL)
1343 			continue;
1344 
1345 		info = xfer->xroot;
1346 
1347 		USB_XFER_LOCK(xfer);
1348 		USB_BUS_LOCK(info->bus);
1349 
1350 		/*
1351 		 * HINT: when you start/stop a transfer, it might be a
1352 		 * good idea to directly use the "pxfer[]" structure:
1353 		 *
1354 		 * usbd_transfer_start(sc->pxfer[0]);
1355 		 * usbd_transfer_stop(sc->pxfer[0]);
1356 		 *
1357 		 * That way, if your code has many parts that will not
1358 		 * stop running under the same lock, in other words
1359 		 * "xfer_mtx", the usbd_transfer_start and
1360 		 * usbd_transfer_stop functions will simply return
1361 		 * when they detect a NULL pointer argument.
1362 		 *
1363 		 * To avoid any races we clear the "pxfer[]" pointer
1364 		 * while holding the private mutex of the driver:
1365 		 */
1366 		pxfer[n_setup] = NULL;
1367 
1368 		USB_BUS_UNLOCK(info->bus);
1369 		USB_XFER_UNLOCK(xfer);
1370 
1371 		usbd_transfer_drain(xfer);
1372 
1373 #if USB_HAVE_BUSDMA
1374 		if (xfer->flags_int.bdma_enable)
1375 			needs_delay = 1;
1376 #endif
1377 		/*
1378 		 * NOTE: default endpoint does not have an
1379 		 * interface, even if endpoint->iface_index == 0
1380 		 */
1381 		USB_BUS_LOCK(info->bus);
1382 		xfer->endpoint->refcount_alloc--;
1383 		USB_BUS_UNLOCK(info->bus);
1384 
1385 		usb_callout_drain(&xfer->timeout_handle);
1386 
1387 		USB_BUS_LOCK(info->bus);
1388 
1389 		USB_ASSERT(info->setup_refcount != 0, ("Invalid setup "
1390 		    "reference count\n"));
1391 
1392 		info->setup_refcount--;
1393 
1394 		if (info->setup_refcount == 0) {
1395 			usbd_transfer_unsetup_sub(info,
1396 			    needs_delay);
1397 		} else {
1398 			USB_BUS_UNLOCK(info->bus);
1399 		}
1400 	}
1401 }
1402 
1403 /*------------------------------------------------------------------------*
1404  *	usbd_control_transfer_init - factored out code
1405  *
1406  * In USB Device Mode we have to wait for the SETUP packet which
1407  * containst the "struct usb_device_request" structure, before we can
1408  * transfer any data. In USB Host Mode we already have the SETUP
1409  * packet at the moment the USB transfer is started. This leads us to
1410  * having to setup the USB transfer at two different places in
1411  * time. This function just contains factored out control transfer
1412  * initialisation code, so that we don't duplicate the code.
1413  *------------------------------------------------------------------------*/
1414 static void
1415 usbd_control_transfer_init(struct usb_xfer *xfer)
1416 {
1417 	struct usb_device_request req;
1418 
1419 	/* copy out the USB request header */
1420 
1421 	usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1422 
1423 	/* setup remainder */
1424 
1425 	xfer->flags_int.control_rem = UGETW(req.wLength);
1426 
1427 	/* copy direction to endpoint variable */
1428 
1429 	xfer->endpointno &= ~(UE_DIR_IN | UE_DIR_OUT);
1430 	xfer->endpointno |=
1431 	    (req.bmRequestType & UT_READ) ? UE_DIR_IN : UE_DIR_OUT;
1432 }
1433 
1434 /*------------------------------------------------------------------------*
1435  *	usbd_control_transfer_did_data
1436  *
1437  * This function returns non-zero if a control endpoint has
1438  * transferred the first DATA packet after the SETUP packet.
1439  * Else it returns zero.
1440  *------------------------------------------------------------------------*/
1441 static uint8_t
1442 usbd_control_transfer_did_data(struct usb_xfer *xfer)
1443 {
1444 	struct usb_device_request req;
1445 
1446 	/* SETUP packet is not yet sent */
1447 	if (xfer->flags_int.control_hdr != 0)
1448 		return (0);
1449 
1450 	/* copy out the USB request header */
1451 	usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1452 
1453 	/* compare remainder to the initial value */
1454 	return (xfer->flags_int.control_rem != UGETW(req.wLength));
1455 }
1456 
1457 /*------------------------------------------------------------------------*
1458  *	usbd_setup_ctrl_transfer
1459  *
1460  * This function handles initialisation of control transfers. Control
1461  * transfers are special in that regard that they can both transmit
1462  * and receive data.
1463  *
1464  * Return values:
1465  *    0: Success
1466  * Else: Failure
1467  *------------------------------------------------------------------------*/
1468 static int
1469 usbd_setup_ctrl_transfer(struct usb_xfer *xfer)
1470 {
1471 	usb_frlength_t len;
1472 
1473 	/* Check for control endpoint stall */
1474 	if (xfer->flags.stall_pipe && xfer->flags_int.control_act) {
1475 		/* the control transfer is no longer active */
1476 		xfer->flags_int.control_stall = 1;
1477 		xfer->flags_int.control_act = 0;
1478 	} else {
1479 		/* don't stall control transfer by default */
1480 		xfer->flags_int.control_stall = 0;
1481 	}
1482 
1483 	/* Check for invalid number of frames */
1484 	if (xfer->nframes > 2) {
1485 		/*
1486 		 * If you need to split a control transfer, you
1487 		 * have to do one part at a time. Only with
1488 		 * non-control transfers you can do multiple
1489 		 * parts a time.
1490 		 */
1491 		DPRINTFN(0, "Too many frames: %u\n",
1492 		    (unsigned int)xfer->nframes);
1493 		goto error;
1494 	}
1495 
1496 	/*
1497          * Check if there is a control
1498          * transfer in progress:
1499          */
1500 	if (xfer->flags_int.control_act) {
1501 
1502 		if (xfer->flags_int.control_hdr) {
1503 
1504 			/* clear send header flag */
1505 
1506 			xfer->flags_int.control_hdr = 0;
1507 
1508 			/* setup control transfer */
1509 			if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1510 				usbd_control_transfer_init(xfer);
1511 			}
1512 		}
1513 		/* get data length */
1514 
1515 		len = xfer->sumlen;
1516 
1517 	} else {
1518 
1519 		/* the size of the SETUP structure is hardcoded ! */
1520 
1521 		if (xfer->frlengths[0] != sizeof(struct usb_device_request)) {
1522 			DPRINTFN(0, "Wrong framelength %u != %zu\n",
1523 			    xfer->frlengths[0], sizeof(struct
1524 			    usb_device_request));
1525 			goto error;
1526 		}
1527 		/* check USB mode */
1528 		if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1529 
1530 			/* check number of frames */
1531 			if (xfer->nframes != 1) {
1532 				/*
1533 			         * We need to receive the setup
1534 			         * message first so that we know the
1535 			         * data direction!
1536 			         */
1537 				DPRINTF("Misconfigured transfer\n");
1538 				goto error;
1539 			}
1540 			/*
1541 			 * Set a dummy "control_rem" value.  This
1542 			 * variable will be overwritten later by a
1543 			 * call to "usbd_control_transfer_init()" !
1544 			 */
1545 			xfer->flags_int.control_rem = 0xFFFF;
1546 		} else {
1547 
1548 			/* setup "endpoint" and "control_rem" */
1549 
1550 			usbd_control_transfer_init(xfer);
1551 		}
1552 
1553 		/* set transfer-header flag */
1554 
1555 		xfer->flags_int.control_hdr = 1;
1556 
1557 		/* get data length */
1558 
1559 		len = (xfer->sumlen - sizeof(struct usb_device_request));
1560 	}
1561 
1562 	/* update did data flag */
1563 
1564 	xfer->flags_int.control_did_data =
1565 	    usbd_control_transfer_did_data(xfer);
1566 
1567 	/* check if there is a length mismatch */
1568 
1569 	if (len > xfer->flags_int.control_rem) {
1570 		DPRINTFN(0, "Length (%d) greater than "
1571 		    "remaining length (%d)\n", len,
1572 		    xfer->flags_int.control_rem);
1573 		goto error;
1574 	}
1575 	/* check if we are doing a short transfer */
1576 
1577 	if (xfer->flags.force_short_xfer) {
1578 		xfer->flags_int.control_rem = 0;
1579 	} else {
1580 		if ((len != xfer->max_data_length) &&
1581 		    (len != xfer->flags_int.control_rem) &&
1582 		    (xfer->nframes != 1)) {
1583 			DPRINTFN(0, "Short control transfer without "
1584 			    "force_short_xfer set\n");
1585 			goto error;
1586 		}
1587 		xfer->flags_int.control_rem -= len;
1588 	}
1589 
1590 	/* the status part is executed when "control_act" is 0 */
1591 
1592 	if ((xfer->flags_int.control_rem > 0) ||
1593 	    (xfer->flags.manual_status)) {
1594 		/* don't execute the STATUS stage yet */
1595 		xfer->flags_int.control_act = 1;
1596 
1597 		/* sanity check */
1598 		if ((!xfer->flags_int.control_hdr) &&
1599 		    (xfer->nframes == 1)) {
1600 			/*
1601 		         * This is not a valid operation!
1602 		         */
1603 			DPRINTFN(0, "Invalid parameter "
1604 			    "combination\n");
1605 			goto error;
1606 		}
1607 	} else {
1608 		/* time to execute the STATUS stage */
1609 		xfer->flags_int.control_act = 0;
1610 	}
1611 	return (0);			/* success */
1612 
1613 error:
1614 	return (1);			/* failure */
1615 }
1616 
1617 /*------------------------------------------------------------------------*
1618  *	usbd_transfer_submit - start USB hardware for the given transfer
1619  *
1620  * This function should only be called from the USB callback.
1621  *------------------------------------------------------------------------*/
1622 void
1623 usbd_transfer_submit(struct usb_xfer *xfer)
1624 {
1625 	struct usb_xfer_root *info;
1626 	struct usb_bus *bus;
1627 	usb_frcount_t x;
1628 
1629 	info = xfer->xroot;
1630 	bus = info->bus;
1631 
1632 	DPRINTF("xfer=%p, endpoint=%p, nframes=%d, dir=%s\n",
1633 	    xfer, xfer->endpoint, xfer->nframes, USB_GET_DATA_ISREAD(xfer) ?
1634 	    "read" : "write");
1635 
1636 #ifdef USB_DEBUG
1637 	if (USB_DEBUG_VAR > 0) {
1638 		USB_BUS_LOCK(bus);
1639 
1640 		usb_dump_endpoint(xfer->endpoint);
1641 
1642 		USB_BUS_UNLOCK(bus);
1643 	}
1644 #endif
1645 
1646 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1647 	USB_BUS_LOCK_ASSERT(bus, MA_NOTOWNED);
1648 
1649 	/* Only open the USB transfer once! */
1650 	if (!xfer->flags_int.open) {
1651 		xfer->flags_int.open = 1;
1652 
1653 		DPRINTF("open\n");
1654 
1655 		USB_BUS_LOCK(bus);
1656 		(xfer->endpoint->methods->open) (xfer);
1657 		USB_BUS_UNLOCK(bus);
1658 	}
1659 	/* set "transferring" flag */
1660 	xfer->flags_int.transferring = 1;
1661 
1662 #if USB_HAVE_POWERD
1663 	/* increment power reference */
1664 	usbd_transfer_power_ref(xfer, 1);
1665 #endif
1666 	/*
1667 	 * Check if the transfer is waiting on a queue, most
1668 	 * frequently the "done_q":
1669 	 */
1670 	if (xfer->wait_queue) {
1671 		USB_BUS_LOCK(bus);
1672 		usbd_transfer_dequeue(xfer);
1673 		USB_BUS_UNLOCK(bus);
1674 	}
1675 	/* clear "did_dma_delay" flag */
1676 	xfer->flags_int.did_dma_delay = 0;
1677 
1678 	/* clear "did_close" flag */
1679 	xfer->flags_int.did_close = 0;
1680 
1681 #if USB_HAVE_BUSDMA
1682 	/* clear "bdma_setup" flag */
1683 	xfer->flags_int.bdma_setup = 0;
1684 #endif
1685 	/* by default we cannot cancel any USB transfer immediately */
1686 	xfer->flags_int.can_cancel_immed = 0;
1687 
1688 	/* clear lengths and frame counts by default */
1689 	xfer->sumlen = 0;
1690 	xfer->actlen = 0;
1691 	xfer->aframes = 0;
1692 
1693 	/* clear any previous errors */
1694 	xfer->error = 0;
1695 
1696 	/* Check if the device is still alive */
1697 	if (info->udev->state < USB_STATE_POWERED) {
1698 		USB_BUS_LOCK(bus);
1699 		/*
1700 		 * Must return cancelled error code else
1701 		 * device drivers can hang.
1702 		 */
1703 		usbd_transfer_done(xfer, USB_ERR_CANCELLED);
1704 		USB_BUS_UNLOCK(bus);
1705 		return;
1706 	}
1707 
1708 	/* sanity check */
1709 	if (xfer->nframes == 0) {
1710 		if (xfer->flags.stall_pipe) {
1711 			/*
1712 			 * Special case - want to stall without transferring
1713 			 * any data:
1714 			 */
1715 			DPRINTF("xfer=%p nframes=0: stall "
1716 			    "or clear stall!\n", xfer);
1717 			USB_BUS_LOCK(bus);
1718 			xfer->flags_int.can_cancel_immed = 1;
1719 			/* start the transfer */
1720 			usb_command_wrapper(&xfer->endpoint->
1721 			    endpoint_q[xfer->stream_id], xfer);
1722 			USB_BUS_UNLOCK(bus);
1723 			return;
1724 		}
1725 		USB_BUS_LOCK(bus);
1726 		usbd_transfer_done(xfer, USB_ERR_INVAL);
1727 		USB_BUS_UNLOCK(bus);
1728 		return;
1729 	}
1730 	/* compute some variables */
1731 
1732 	for (x = 0; x != xfer->nframes; x++) {
1733 		/* make a copy of the frlenghts[] */
1734 		xfer->frlengths[x + xfer->max_frame_count] = xfer->frlengths[x];
1735 		/* compute total transfer length */
1736 		xfer->sumlen += xfer->frlengths[x];
1737 		if (xfer->sumlen < xfer->frlengths[x]) {
1738 			/* length wrapped around */
1739 			USB_BUS_LOCK(bus);
1740 			usbd_transfer_done(xfer, USB_ERR_INVAL);
1741 			USB_BUS_UNLOCK(bus);
1742 			return;
1743 		}
1744 	}
1745 
1746 	/* clear some internal flags */
1747 
1748 	xfer->flags_int.short_xfer_ok = 0;
1749 	xfer->flags_int.short_frames_ok = 0;
1750 
1751 	/* check if this is a control transfer */
1752 
1753 	if (xfer->flags_int.control_xfr) {
1754 
1755 		if (usbd_setup_ctrl_transfer(xfer)) {
1756 			USB_BUS_LOCK(bus);
1757 			usbd_transfer_done(xfer, USB_ERR_STALLED);
1758 			USB_BUS_UNLOCK(bus);
1759 			return;
1760 		}
1761 	}
1762 	/*
1763 	 * Setup filtered version of some transfer flags,
1764 	 * in case of data read direction
1765 	 */
1766 	if (USB_GET_DATA_ISREAD(xfer)) {
1767 
1768 		if (xfer->flags.short_frames_ok) {
1769 			xfer->flags_int.short_xfer_ok = 1;
1770 			xfer->flags_int.short_frames_ok = 1;
1771 		} else if (xfer->flags.short_xfer_ok) {
1772 			xfer->flags_int.short_xfer_ok = 1;
1773 
1774 			/* check for control transfer */
1775 			if (xfer->flags_int.control_xfr) {
1776 				/*
1777 				 * 1) Control transfers do not support
1778 				 * reception of multiple short USB
1779 				 * frames in host mode and device side
1780 				 * mode, with exception of:
1781 				 *
1782 				 * 2) Due to sometimes buggy device
1783 				 * side firmware we need to do a
1784 				 * STATUS stage in case of short
1785 				 * control transfers in USB host mode.
1786 				 * The STATUS stage then becomes the
1787 				 * "alt_next" to the DATA stage.
1788 				 */
1789 				xfer->flags_int.short_frames_ok = 1;
1790 			}
1791 		}
1792 	}
1793 	/*
1794 	 * Check if BUS-DMA support is enabled and try to load virtual
1795 	 * buffers into DMA, if any:
1796 	 */
1797 #if USB_HAVE_BUSDMA
1798 	if (xfer->flags_int.bdma_enable) {
1799 		/* insert the USB transfer last in the BUS-DMA queue */
1800 		usb_command_wrapper(&xfer->xroot->dma_q, xfer);
1801 		return;
1802 	}
1803 #endif
1804 	/*
1805 	 * Enter the USB transfer into the Host Controller or
1806 	 * Device Controller schedule:
1807 	 */
1808 	usbd_pipe_enter(xfer);
1809 }
1810 
1811 /*------------------------------------------------------------------------*
1812  *	usbd_pipe_enter - factored out code
1813  *------------------------------------------------------------------------*/
1814 void
1815 usbd_pipe_enter(struct usb_xfer *xfer)
1816 {
1817 	struct usb_endpoint *ep;
1818 
1819 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1820 
1821 	USB_BUS_LOCK(xfer->xroot->bus);
1822 
1823 	ep = xfer->endpoint;
1824 
1825 	DPRINTF("enter\n");
1826 
1827 	/* the transfer can now be cancelled */
1828 	xfer->flags_int.can_cancel_immed = 1;
1829 
1830 	/* enter the transfer */
1831 	(ep->methods->enter) (xfer);
1832 
1833 	/* check for transfer error */
1834 	if (xfer->error) {
1835 		/* some error has happened */
1836 		usbd_transfer_done(xfer, 0);
1837 		USB_BUS_UNLOCK(xfer->xroot->bus);
1838 		return;
1839 	}
1840 
1841 	/* start the transfer */
1842 	usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], xfer);
1843 	USB_BUS_UNLOCK(xfer->xroot->bus);
1844 }
1845 
1846 /*------------------------------------------------------------------------*
1847  *	usbd_transfer_start - start an USB transfer
1848  *
1849  * NOTE: Calling this function more than one time will only
1850  *       result in a single transfer start, until the USB transfer
1851  *       completes.
1852  *------------------------------------------------------------------------*/
1853 void
1854 usbd_transfer_start(struct usb_xfer *xfer)
1855 {
1856 	if (xfer == NULL) {
1857 		/* transfer is gone */
1858 		return;
1859 	}
1860 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1861 
1862 	/* mark the USB transfer started */
1863 
1864 	if (!xfer->flags_int.started) {
1865 		/* lock the BUS lock to avoid races updating flags_int */
1866 		USB_BUS_LOCK(xfer->xroot->bus);
1867 		xfer->flags_int.started = 1;
1868 		USB_BUS_UNLOCK(xfer->xroot->bus);
1869 	}
1870 	/* check if the USB transfer callback is already transferring */
1871 
1872 	if (xfer->flags_int.transferring) {
1873 		return;
1874 	}
1875 	USB_BUS_LOCK(xfer->xroot->bus);
1876 	/* call the USB transfer callback */
1877 	usbd_callback_ss_done_defer(xfer);
1878 	USB_BUS_UNLOCK(xfer->xroot->bus);
1879 }
1880 
1881 /*------------------------------------------------------------------------*
1882  *	usbd_transfer_stop - stop an USB transfer
1883  *
1884  * NOTE: Calling this function more than one time will only
1885  *       result in a single transfer stop.
1886  * NOTE: When this function returns it is not safe to free nor
1887  *       reuse any DMA buffers. See "usbd_transfer_drain()".
1888  *------------------------------------------------------------------------*/
1889 void
1890 usbd_transfer_stop(struct usb_xfer *xfer)
1891 {
1892 	struct usb_endpoint *ep;
1893 
1894 	if (xfer == NULL) {
1895 		/* transfer is gone */
1896 		return;
1897 	}
1898 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1899 
1900 	/* check if the USB transfer was ever opened */
1901 
1902 	if (!xfer->flags_int.open) {
1903 		if (xfer->flags_int.started) {
1904 			/* nothing to do except clearing the "started" flag */
1905 			/* lock the BUS lock to avoid races updating flags_int */
1906 			USB_BUS_LOCK(xfer->xroot->bus);
1907 			xfer->flags_int.started = 0;
1908 			USB_BUS_UNLOCK(xfer->xroot->bus);
1909 		}
1910 		return;
1911 	}
1912 	/* try to stop the current USB transfer */
1913 
1914 	USB_BUS_LOCK(xfer->xroot->bus);
1915 	/* override any previous error */
1916 	xfer->error = USB_ERR_CANCELLED;
1917 
1918 	/*
1919 	 * Clear "open" and "started" when both private and USB lock
1920 	 * is locked so that we don't get a race updating "flags_int"
1921 	 */
1922 	xfer->flags_int.open = 0;
1923 	xfer->flags_int.started = 0;
1924 
1925 	/*
1926 	 * Check if we can cancel the USB transfer immediately.
1927 	 */
1928 	if (xfer->flags_int.transferring) {
1929 		if (xfer->flags_int.can_cancel_immed &&
1930 		    (!xfer->flags_int.did_close)) {
1931 			DPRINTF("close\n");
1932 			/*
1933 			 * The following will lead to an USB_ERR_CANCELLED
1934 			 * error code being passed to the USB callback.
1935 			 */
1936 			(xfer->endpoint->methods->close) (xfer);
1937 			/* only close once */
1938 			xfer->flags_int.did_close = 1;
1939 		} else {
1940 			/* need to wait for the next done callback */
1941 		}
1942 	} else {
1943 		DPRINTF("close\n");
1944 
1945 		/* close here and now */
1946 		(xfer->endpoint->methods->close) (xfer);
1947 
1948 		/*
1949 		 * Any additional DMA delay is done by
1950 		 * "usbd_transfer_unsetup()".
1951 		 */
1952 
1953 		/*
1954 		 * Special case. Check if we need to restart a blocked
1955 		 * endpoint.
1956 		 */
1957 		ep = xfer->endpoint;
1958 
1959 		/*
1960 		 * If the current USB transfer is completing we need
1961 		 * to start the next one:
1962 		 */
1963 		if (ep->endpoint_q[xfer->stream_id].curr == xfer) {
1964 			usb_command_wrapper(
1965 			    &ep->endpoint_q[xfer->stream_id], NULL);
1966 		}
1967 	}
1968 
1969 	USB_BUS_UNLOCK(xfer->xroot->bus);
1970 }
1971 
1972 /*------------------------------------------------------------------------*
1973  *	usbd_transfer_pending
1974  *
1975  * This function will check if an USB transfer is pending which is a
1976  * little bit complicated!
1977  * Return values:
1978  * 0: Not pending
1979  * 1: Pending: The USB transfer will receive a callback in the future.
1980  *------------------------------------------------------------------------*/
1981 uint8_t
1982 usbd_transfer_pending(struct usb_xfer *xfer)
1983 {
1984 	struct usb_xfer_root *info;
1985 	struct usb_xfer_queue *pq;
1986 
1987 	if (xfer == NULL) {
1988 		/* transfer is gone */
1989 		return (0);
1990 	}
1991 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1992 
1993 	if (xfer->flags_int.transferring) {
1994 		/* trivial case */
1995 		return (1);
1996 	}
1997 	USB_BUS_LOCK(xfer->xroot->bus);
1998 	if (xfer->wait_queue) {
1999 		/* we are waiting on a queue somewhere */
2000 		USB_BUS_UNLOCK(xfer->xroot->bus);
2001 		return (1);
2002 	}
2003 	info = xfer->xroot;
2004 	pq = &info->done_q;
2005 
2006 	if (pq->curr == xfer) {
2007 		/* we are currently scheduled for callback */
2008 		USB_BUS_UNLOCK(xfer->xroot->bus);
2009 		return (1);
2010 	}
2011 	/* we are not pending */
2012 	USB_BUS_UNLOCK(xfer->xroot->bus);
2013 	return (0);
2014 }
2015 
2016 /*------------------------------------------------------------------------*
2017  *	usbd_transfer_drain
2018  *
2019  * This function will stop the USB transfer and wait for any
2020  * additional BUS-DMA and HW-DMA operations to complete. Buffers that
2021  * are loaded into DMA can safely be freed or reused after that this
2022  * function has returned.
2023  *------------------------------------------------------------------------*/
2024 void
2025 usbd_transfer_drain(struct usb_xfer *xfer)
2026 {
2027 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2028 	    "usbd_transfer_drain can sleep!");
2029 
2030 	if (xfer == NULL) {
2031 		/* transfer is gone */
2032 		return;
2033 	}
2034 	if (xfer->xroot->xfer_mtx != &Giant) {
2035 		USB_XFER_LOCK_ASSERT(xfer, MA_NOTOWNED);
2036 	}
2037 	USB_XFER_LOCK(xfer);
2038 
2039 	usbd_transfer_stop(xfer);
2040 
2041 	while (usbd_transfer_pending(xfer) ||
2042 	    xfer->flags_int.doing_callback) {
2043 
2044 		/*
2045 		 * It is allowed that the callback can drop its
2046 		 * transfer mutex. In that case checking only
2047 		 * "usbd_transfer_pending()" is not enough to tell if
2048 		 * the USB transfer is fully drained. We also need to
2049 		 * check the internal "doing_callback" flag.
2050 		 */
2051 		xfer->flags_int.draining = 1;
2052 
2053 		/*
2054 		 * Wait until the current outstanding USB
2055 		 * transfer is complete !
2056 		 */
2057 		cv_wait(&xfer->xroot->cv_drain, xfer->xroot->xfer_mtx);
2058 	}
2059 	USB_XFER_UNLOCK(xfer);
2060 }
2061 
2062 struct usb_page_cache *
2063 usbd_xfer_get_frame(struct usb_xfer *xfer, usb_frcount_t frindex)
2064 {
2065 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2066 
2067 	return (&xfer->frbuffers[frindex]);
2068 }
2069 
2070 void *
2071 usbd_xfer_get_frame_buffer(struct usb_xfer *xfer, usb_frcount_t frindex)
2072 {
2073 	struct usb_page_search page_info;
2074 
2075 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2076 
2077 	usbd_get_page(&xfer->frbuffers[frindex], 0, &page_info);
2078 	return (page_info.buffer);
2079 }
2080 
2081 /*------------------------------------------------------------------------*
2082  *	usbd_xfer_get_fps_shift
2083  *
2084  * The following function is only useful for isochronous transfers. It
2085  * returns how many times the frame execution rate has been shifted
2086  * down.
2087  *
2088  * Return value:
2089  * Success: 0..3
2090  * Failure: 0
2091  *------------------------------------------------------------------------*/
2092 uint8_t
2093 usbd_xfer_get_fps_shift(struct usb_xfer *xfer)
2094 {
2095 	return (xfer->fps_shift);
2096 }
2097 
2098 usb_frlength_t
2099 usbd_xfer_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex)
2100 {
2101 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2102 
2103 	return (xfer->frlengths[frindex]);
2104 }
2105 
2106 /*------------------------------------------------------------------------*
2107  *	usbd_xfer_set_frame_data
2108  *
2109  * This function sets the pointer of the buffer that should
2110  * loaded directly into DMA for the given USB frame. Passing "ptr"
2111  * equal to NULL while the corresponding "frlength" is greater
2112  * than zero gives undefined results!
2113  *------------------------------------------------------------------------*/
2114 void
2115 usbd_xfer_set_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
2116     void *ptr, usb_frlength_t len)
2117 {
2118 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2119 
2120 	/* set virtual address to load and length */
2121 	xfer->frbuffers[frindex].buffer = ptr;
2122 	usbd_xfer_set_frame_len(xfer, frindex, len);
2123 }
2124 
2125 void
2126 usbd_xfer_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
2127     void **ptr, int *len)
2128 {
2129 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2130 
2131 	if (ptr != NULL)
2132 		*ptr = xfer->frbuffers[frindex].buffer;
2133 	if (len != NULL)
2134 		*len = xfer->frlengths[frindex];
2135 }
2136 
2137 /*------------------------------------------------------------------------*
2138  *	usbd_xfer_old_frame_length
2139  *
2140  * This function returns the framelength of the given frame at the
2141  * time the transfer was submitted. This function can be used to
2142  * compute the starting data pointer of the next isochronous frame
2143  * when an isochronous transfer has completed.
2144  *------------------------------------------------------------------------*/
2145 usb_frlength_t
2146 usbd_xfer_old_frame_length(struct usb_xfer *xfer, usb_frcount_t frindex)
2147 {
2148 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2149 
2150 	return (xfer->frlengths[frindex + xfer->max_frame_count]);
2151 }
2152 
2153 void
2154 usbd_xfer_status(struct usb_xfer *xfer, int *actlen, int *sumlen, int *aframes,
2155     int *nframes)
2156 {
2157 	if (actlen != NULL)
2158 		*actlen = xfer->actlen;
2159 	if (sumlen != NULL)
2160 		*sumlen = xfer->sumlen;
2161 	if (aframes != NULL)
2162 		*aframes = xfer->aframes;
2163 	if (nframes != NULL)
2164 		*nframes = xfer->nframes;
2165 }
2166 
2167 /*------------------------------------------------------------------------*
2168  *	usbd_xfer_set_frame_offset
2169  *
2170  * This function sets the frame data buffer offset relative to the beginning
2171  * of the USB DMA buffer allocated for this USB transfer.
2172  *------------------------------------------------------------------------*/
2173 void
2174 usbd_xfer_set_frame_offset(struct usb_xfer *xfer, usb_frlength_t offset,
2175     usb_frcount_t frindex)
2176 {
2177 	KASSERT(!xfer->flags.ext_buffer, ("Cannot offset data frame "
2178 	    "when the USB buffer is external\n"));
2179 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2180 
2181 	/* set virtual address to load */
2182 	xfer->frbuffers[frindex].buffer =
2183 	    USB_ADD_BYTES(xfer->local_buffer, offset);
2184 }
2185 
2186 void
2187 usbd_xfer_set_interval(struct usb_xfer *xfer, int i)
2188 {
2189 	xfer->interval = i;
2190 }
2191 
2192 void
2193 usbd_xfer_set_timeout(struct usb_xfer *xfer, int t)
2194 {
2195 	xfer->timeout = t;
2196 }
2197 
2198 void
2199 usbd_xfer_set_frames(struct usb_xfer *xfer, usb_frcount_t n)
2200 {
2201 	xfer->nframes = n;
2202 }
2203 
2204 usb_frcount_t
2205 usbd_xfer_max_frames(struct usb_xfer *xfer)
2206 {
2207 	return (xfer->max_frame_count);
2208 }
2209 
2210 usb_frlength_t
2211 usbd_xfer_max_len(struct usb_xfer *xfer)
2212 {
2213 	return (xfer->max_data_length);
2214 }
2215 
2216 usb_frlength_t
2217 usbd_xfer_max_framelen(struct usb_xfer *xfer)
2218 {
2219 	return (xfer->max_frame_size);
2220 }
2221 
2222 void
2223 usbd_xfer_set_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex,
2224     usb_frlength_t len)
2225 {
2226 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2227 
2228 	xfer->frlengths[frindex] = len;
2229 }
2230 
2231 /*------------------------------------------------------------------------*
2232  *	usb_callback_proc - factored out code
2233  *
2234  * This function performs USB callbacks.
2235  *------------------------------------------------------------------------*/
2236 static void
2237 usb_callback_proc(struct usb_proc_msg *_pm)
2238 {
2239 	struct usb_done_msg *pm = (void *)_pm;
2240 	struct usb_xfer_root *info = pm->xroot;
2241 
2242 	/* Change locking order */
2243 	USB_BUS_UNLOCK(info->bus);
2244 
2245 	/*
2246 	 * We exploit the fact that the mutex is the same for all
2247 	 * callbacks that will be called from this thread:
2248 	 */
2249 	mtx_lock(info->xfer_mtx);
2250 	USB_BUS_LOCK(info->bus);
2251 
2252 	/* Continue where we lost track */
2253 	usb_command_wrapper(&info->done_q,
2254 	    info->done_q.curr);
2255 
2256 	mtx_unlock(info->xfer_mtx);
2257 }
2258 
2259 /*------------------------------------------------------------------------*
2260  *	usbd_callback_ss_done_defer
2261  *
2262  * This function will defer the start, stop and done callback to the
2263  * correct thread.
2264  *------------------------------------------------------------------------*/
2265 static void
2266 usbd_callback_ss_done_defer(struct usb_xfer *xfer)
2267 {
2268 	struct usb_xfer_root *info = xfer->xroot;
2269 	struct usb_xfer_queue *pq = &info->done_q;
2270 
2271 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2272 
2273 	if (pq->curr != xfer) {
2274 		usbd_transfer_enqueue(pq, xfer);
2275 	}
2276 	if (!pq->recurse_1) {
2277 
2278 		/*
2279 	         * We have to postpone the callback due to the fact we
2280 	         * will have a Lock Order Reversal, LOR, if we try to
2281 	         * proceed !
2282 	         */
2283 		if (usb_proc_msignal(info->done_p,
2284 		    &info->done_m[0], &info->done_m[1])) {
2285 			/* ignore */
2286 		}
2287 	} else {
2288 		/* clear second recurse flag */
2289 		pq->recurse_2 = 0;
2290 	}
2291 	return;
2292 
2293 }
2294 
2295 /*------------------------------------------------------------------------*
2296  *	usbd_callback_wrapper
2297  *
2298  * This is a wrapper for USB callbacks. This wrapper does some
2299  * auto-magic things like figuring out if we can call the callback
2300  * directly from the current context or if we need to wakeup the
2301  * interrupt process.
2302  *------------------------------------------------------------------------*/
2303 static void
2304 usbd_callback_wrapper(struct usb_xfer_queue *pq)
2305 {
2306 	struct usb_xfer *xfer = pq->curr;
2307 	struct usb_xfer_root *info = xfer->xroot;
2308 
2309 	USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2310 	if (!mtx_owned(info->xfer_mtx) && !SCHEDULER_STOPPED()) {
2311 		/*
2312 	       	 * Cases that end up here:
2313 		 *
2314 		 * 5) HW interrupt done callback or other source.
2315 		 */
2316 		DPRINTFN(3, "case 5\n");
2317 
2318 		/*
2319 	         * We have to postpone the callback due to the fact we
2320 	         * will have a Lock Order Reversal, LOR, if we try to
2321 	         * proceed !
2322 	         */
2323 		if (usb_proc_msignal(info->done_p,
2324 		    &info->done_m[0], &info->done_m[1])) {
2325 			/* ignore */
2326 		}
2327 		return;
2328 	}
2329 	/*
2330 	 * Cases that end up here:
2331 	 *
2332 	 * 1) We are starting a transfer
2333 	 * 2) We are prematurely calling back a transfer
2334 	 * 3) We are stopping a transfer
2335 	 * 4) We are doing an ordinary callback
2336 	 */
2337 	DPRINTFN(3, "case 1-4\n");
2338 	/* get next USB transfer in the queue */
2339 	info->done_q.curr = NULL;
2340 
2341 	/* set flag in case of drain */
2342 	xfer->flags_int.doing_callback = 1;
2343 
2344 	USB_BUS_UNLOCK(info->bus);
2345 	USB_BUS_LOCK_ASSERT(info->bus, MA_NOTOWNED);
2346 
2347 	/* set correct USB state for callback */
2348 	if (!xfer->flags_int.transferring) {
2349 		xfer->usb_state = USB_ST_SETUP;
2350 		if (!xfer->flags_int.started) {
2351 			/* we got stopped before we even got started */
2352 			USB_BUS_LOCK(info->bus);
2353 			goto done;
2354 		}
2355 	} else {
2356 
2357 		if (usbd_callback_wrapper_sub(xfer)) {
2358 			/* the callback has been deferred */
2359 			USB_BUS_LOCK(info->bus);
2360 			goto done;
2361 		}
2362 #if USB_HAVE_POWERD
2363 		/* decrement power reference */
2364 		usbd_transfer_power_ref(xfer, -1);
2365 #endif
2366 		xfer->flags_int.transferring = 0;
2367 
2368 		if (xfer->error) {
2369 			xfer->usb_state = USB_ST_ERROR;
2370 		} else {
2371 			/* set transferred state */
2372 			xfer->usb_state = USB_ST_TRANSFERRED;
2373 #if USB_HAVE_BUSDMA
2374 			/* sync DMA memory, if any */
2375 			if (xfer->flags_int.bdma_enable &&
2376 			    (!xfer->flags_int.bdma_no_post_sync)) {
2377 				usb_bdma_post_sync(xfer);
2378 			}
2379 #endif
2380 		}
2381 	}
2382 
2383 #if USB_HAVE_PF
2384 	if (xfer->usb_state != USB_ST_SETUP)
2385 		usbpf_xfertap(xfer, USBPF_XFERTAP_DONE);
2386 #endif
2387 	/* call processing routine */
2388 	(xfer->callback) (xfer, xfer->error);
2389 
2390 	/* pickup the USB mutex again */
2391 	USB_BUS_LOCK(info->bus);
2392 
2393 	/*
2394 	 * Check if we got started after that we got cancelled, but
2395 	 * before we managed to do the callback.
2396 	 */
2397 	if ((!xfer->flags_int.open) &&
2398 	    (xfer->flags_int.started) &&
2399 	    (xfer->usb_state == USB_ST_ERROR)) {
2400 		/* clear flag in case of drain */
2401 		xfer->flags_int.doing_callback = 0;
2402 		/* try to loop, but not recursivly */
2403 		usb_command_wrapper(&info->done_q, xfer);
2404 		return;
2405 	}
2406 
2407 done:
2408 	/* clear flag in case of drain */
2409 	xfer->flags_int.doing_callback = 0;
2410 
2411 	/*
2412 	 * Check if we are draining.
2413 	 */
2414 	if (xfer->flags_int.draining &&
2415 	    (!xfer->flags_int.transferring)) {
2416 		/* "usbd_transfer_drain()" is waiting for end of transfer */
2417 		xfer->flags_int.draining = 0;
2418 		cv_broadcast(&info->cv_drain);
2419 	}
2420 
2421 	/* do the next callback, if any */
2422 	usb_command_wrapper(&info->done_q,
2423 	    info->done_q.curr);
2424 }
2425 
2426 /*------------------------------------------------------------------------*
2427  *	usb_dma_delay_done_cb
2428  *
2429  * This function is called when the DMA delay has been exectuded, and
2430  * will make sure that the callback is called to complete the USB
2431  * transfer. This code path is ususally only used when there is an USB
2432  * error like USB_ERR_CANCELLED.
2433  *------------------------------------------------------------------------*/
2434 void
2435 usb_dma_delay_done_cb(struct usb_xfer *xfer)
2436 {
2437 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2438 
2439 	DPRINTFN(3, "Completed %p\n", xfer);
2440 
2441 	/* queue callback for execution, again */
2442 	usbd_transfer_done(xfer, 0);
2443 }
2444 
2445 /*------------------------------------------------------------------------*
2446  *	usbd_transfer_dequeue
2447  *
2448  *  - This function is used to remove an USB transfer from a USB
2449  *  transfer queue.
2450  *
2451  *  - This function can be called multiple times in a row.
2452  *------------------------------------------------------------------------*/
2453 void
2454 usbd_transfer_dequeue(struct usb_xfer *xfer)
2455 {
2456 	struct usb_xfer_queue *pq;
2457 
2458 	pq = xfer->wait_queue;
2459 	if (pq) {
2460 		TAILQ_REMOVE(&pq->head, xfer, wait_entry);
2461 		xfer->wait_queue = NULL;
2462 	}
2463 }
2464 
2465 /*------------------------------------------------------------------------*
2466  *	usbd_transfer_enqueue
2467  *
2468  *  - This function is used to insert an USB transfer into a USB *
2469  *  transfer queue.
2470  *
2471  *  - This function can be called multiple times in a row.
2472  *------------------------------------------------------------------------*/
2473 void
2474 usbd_transfer_enqueue(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2475 {
2476 	/*
2477 	 * Insert the USB transfer into the queue, if it is not
2478 	 * already on a USB transfer queue:
2479 	 */
2480 	if (xfer->wait_queue == NULL) {
2481 		xfer->wait_queue = pq;
2482 		TAILQ_INSERT_TAIL(&pq->head, xfer, wait_entry);
2483 	}
2484 }
2485 
2486 /*------------------------------------------------------------------------*
2487  *	usbd_transfer_done
2488  *
2489  *  - This function is used to remove an USB transfer from the busdma,
2490  *  pipe or interrupt queue.
2491  *
2492  *  - This function is used to queue the USB transfer on the done
2493  *  queue.
2494  *
2495  *  - This function is used to stop any USB transfer timeouts.
2496  *------------------------------------------------------------------------*/
2497 void
2498 usbd_transfer_done(struct usb_xfer *xfer, usb_error_t error)
2499 {
2500 	struct usb_xfer_root *info = xfer->xroot;
2501 
2502 	USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2503 
2504 	DPRINTF("err=%s\n", usbd_errstr(error));
2505 
2506 	/*
2507 	 * If we are not transferring then just return.
2508 	 * This can happen during transfer cancel.
2509 	 */
2510 	if (!xfer->flags_int.transferring) {
2511 		DPRINTF("not transferring\n");
2512 		/* end of control transfer, if any */
2513 		xfer->flags_int.control_act = 0;
2514 		return;
2515 	}
2516 	/* only set transfer error, if not already set */
2517 	if (xfer->error == USB_ERR_NORMAL_COMPLETION)
2518 		xfer->error = error;
2519 
2520 	/* stop any callouts */
2521 	usb_callout_stop(&xfer->timeout_handle);
2522 
2523 	/*
2524 	 * If we are waiting on a queue, just remove the USB transfer
2525 	 * from the queue, if any. We should have the required locks
2526 	 * locked to do the remove when this function is called.
2527 	 */
2528 	usbd_transfer_dequeue(xfer);
2529 
2530 #if USB_HAVE_BUSDMA
2531 	if (mtx_owned(info->xfer_mtx)) {
2532 		struct usb_xfer_queue *pq;
2533 
2534 		/*
2535 		 * If the private USB lock is not locked, then we assume
2536 		 * that the BUS-DMA load stage has been passed:
2537 		 */
2538 		pq = &info->dma_q;
2539 
2540 		if (pq->curr == xfer) {
2541 			/* start the next BUS-DMA load, if any */
2542 			usb_command_wrapper(pq, NULL);
2543 		}
2544 	}
2545 #endif
2546 	/* keep some statistics */
2547 	if (xfer->error) {
2548 		info->bus->stats_err.uds_requests
2549 		    [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2550 	} else {
2551 		info->bus->stats_ok.uds_requests
2552 		    [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2553 	}
2554 
2555 	/* call the USB transfer callback */
2556 	usbd_callback_ss_done_defer(xfer);
2557 }
2558 
2559 /*------------------------------------------------------------------------*
2560  *	usbd_transfer_start_cb
2561  *
2562  * This function is called to start the USB transfer when
2563  * "xfer->interval" is greater than zero, and and the endpoint type is
2564  * BULK or CONTROL.
2565  *------------------------------------------------------------------------*/
2566 static void
2567 usbd_transfer_start_cb(void *arg)
2568 {
2569 	struct usb_xfer *xfer = arg;
2570 	struct usb_endpoint *ep = xfer->endpoint;
2571 
2572 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2573 
2574 	DPRINTF("start\n");
2575 
2576 #if USB_HAVE_PF
2577 	usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2578 #endif
2579 
2580 	/* the transfer can now be cancelled */
2581 	xfer->flags_int.can_cancel_immed = 1;
2582 
2583 	/* start USB transfer, if no error */
2584 	if (xfer->error == 0)
2585 		(ep->methods->start) (xfer);
2586 
2587 	/* check for transfer error */
2588 	if (xfer->error) {
2589 		/* some error has happened */
2590 		usbd_transfer_done(xfer, 0);
2591 	}
2592 }
2593 
2594 /*------------------------------------------------------------------------*
2595  *	usbd_xfer_set_stall
2596  *
2597  * This function is used to set the stall flag outside the
2598  * callback. This function is NULL safe.
2599  *------------------------------------------------------------------------*/
2600 void
2601 usbd_xfer_set_stall(struct usb_xfer *xfer)
2602 {
2603 	if (xfer == NULL) {
2604 		/* tearing down */
2605 		return;
2606 	}
2607 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2608 
2609 	/* avoid any races by locking the USB mutex */
2610 	USB_BUS_LOCK(xfer->xroot->bus);
2611 	xfer->flags.stall_pipe = 1;
2612 	USB_BUS_UNLOCK(xfer->xroot->bus);
2613 }
2614 
2615 int
2616 usbd_xfer_is_stalled(struct usb_xfer *xfer)
2617 {
2618 	return (xfer->endpoint->is_stalled);
2619 }
2620 
2621 /*------------------------------------------------------------------------*
2622  *	usbd_transfer_clear_stall
2623  *
2624  * This function is used to clear the stall flag outside the
2625  * callback. This function is NULL safe.
2626  *------------------------------------------------------------------------*/
2627 void
2628 usbd_transfer_clear_stall(struct usb_xfer *xfer)
2629 {
2630 	if (xfer == NULL) {
2631 		/* tearing down */
2632 		return;
2633 	}
2634 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2635 
2636 	/* avoid any races by locking the USB mutex */
2637 	USB_BUS_LOCK(xfer->xroot->bus);
2638 
2639 	xfer->flags.stall_pipe = 0;
2640 
2641 	USB_BUS_UNLOCK(xfer->xroot->bus);
2642 }
2643 
2644 /*------------------------------------------------------------------------*
2645  *	usbd_pipe_start
2646  *
2647  * This function is used to add an USB transfer to the pipe transfer list.
2648  *------------------------------------------------------------------------*/
2649 void
2650 usbd_pipe_start(struct usb_xfer_queue *pq)
2651 {
2652 	struct usb_endpoint *ep;
2653 	struct usb_xfer *xfer;
2654 	uint8_t type;
2655 
2656 	xfer = pq->curr;
2657 	ep = xfer->endpoint;
2658 
2659 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2660 
2661 	/*
2662 	 * If the endpoint is already stalled we do nothing !
2663 	 */
2664 	if (ep->is_stalled) {
2665 		return;
2666 	}
2667 	/*
2668 	 * Check if we are supposed to stall the endpoint:
2669 	 */
2670 	if (xfer->flags.stall_pipe) {
2671 		struct usb_device *udev;
2672 		struct usb_xfer_root *info;
2673 
2674 		/* clear stall command */
2675 		xfer->flags.stall_pipe = 0;
2676 
2677 		/* get pointer to USB device */
2678 		info = xfer->xroot;
2679 		udev = info->udev;
2680 
2681 		/*
2682 		 * Only stall BULK and INTERRUPT endpoints.
2683 		 */
2684 		type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2685 		if ((type == UE_BULK) ||
2686 		    (type == UE_INTERRUPT)) {
2687 			uint8_t did_stall;
2688 
2689 			did_stall = 1;
2690 
2691 			if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2692 				(udev->bus->methods->set_stall) (
2693 				    udev, ep, &did_stall);
2694 			} else if (udev->ctrl_xfer[1]) {
2695 				info = udev->ctrl_xfer[1]->xroot;
2696 				usb_proc_msignal(
2697 				    USB_BUS_NON_GIANT_PROC(info->bus),
2698 				    &udev->cs_msg[0], &udev->cs_msg[1]);
2699 			} else {
2700 				/* should not happen */
2701 				DPRINTFN(0, "No stall handler\n");
2702 			}
2703 			/*
2704 			 * Check if we should stall. Some USB hardware
2705 			 * handles set- and clear-stall in hardware.
2706 			 */
2707 			if (did_stall) {
2708 				/*
2709 				 * The transfer will be continued when
2710 				 * the clear-stall control endpoint
2711 				 * message is received.
2712 				 */
2713 				ep->is_stalled = 1;
2714 				return;
2715 			}
2716 		} else if (type == UE_ISOCHRONOUS) {
2717 
2718 			/*
2719 			 * Make sure any FIFO overflow or other FIFO
2720 			 * error conditions go away by resetting the
2721 			 * endpoint FIFO through the clear stall
2722 			 * method.
2723 			 */
2724 			if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2725 				(udev->bus->methods->clear_stall) (udev, ep);
2726 			}
2727 		}
2728 	}
2729 	/* Set or clear stall complete - special case */
2730 	if (xfer->nframes == 0) {
2731 		/* we are complete */
2732 		xfer->aframes = 0;
2733 		usbd_transfer_done(xfer, 0);
2734 		return;
2735 	}
2736 	/*
2737 	 * Handled cases:
2738 	 *
2739 	 * 1) Start the first transfer queued.
2740 	 *
2741 	 * 2) Re-start the current USB transfer.
2742 	 */
2743 	/*
2744 	 * Check if there should be any
2745 	 * pre transfer start delay:
2746 	 */
2747 	if (xfer->interval > 0) {
2748 		type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2749 		if ((type == UE_BULK) ||
2750 		    (type == UE_CONTROL)) {
2751 			usbd_transfer_timeout_ms(xfer,
2752 			    &usbd_transfer_start_cb,
2753 			    xfer->interval);
2754 			return;
2755 		}
2756 	}
2757 	DPRINTF("start\n");
2758 
2759 #if USB_HAVE_PF
2760 	usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2761 #endif
2762 	/* the transfer can now be cancelled */
2763 	xfer->flags_int.can_cancel_immed = 1;
2764 
2765 	/* start USB transfer, if no error */
2766 	if (xfer->error == 0)
2767 		(ep->methods->start) (xfer);
2768 
2769 	/* check for transfer error */
2770 	if (xfer->error) {
2771 		/* some error has happened */
2772 		usbd_transfer_done(xfer, 0);
2773 	}
2774 }
2775 
2776 /*------------------------------------------------------------------------*
2777  *	usbd_transfer_timeout_ms
2778  *
2779  * This function is used to setup a timeout on the given USB
2780  * transfer. If the timeout has been deferred the callback given by
2781  * "cb" will get called after "ms" milliseconds.
2782  *------------------------------------------------------------------------*/
2783 void
2784 usbd_transfer_timeout_ms(struct usb_xfer *xfer,
2785     void (*cb) (void *arg), usb_timeout_t ms)
2786 {
2787 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2788 
2789 	/* defer delay */
2790 	usb_callout_reset(&xfer->timeout_handle,
2791 	    USB_MS_TO_TICKS(ms) + USB_CALLOUT_ZERO_TICKS, cb, xfer);
2792 }
2793 
2794 /*------------------------------------------------------------------------*
2795  *	usbd_callback_wrapper_sub
2796  *
2797  *  - This function will update variables in an USB transfer after
2798  *  that the USB transfer is complete.
2799  *
2800  *  - This function is used to start the next USB transfer on the
2801  *  ep transfer queue, if any.
2802  *
2803  * NOTE: In some special cases the USB transfer will not be removed from
2804  * the pipe queue, but remain first. To enforce USB transfer removal call
2805  * this function passing the error code "USB_ERR_CANCELLED".
2806  *
2807  * Return values:
2808  * 0: Success.
2809  * Else: The callback has been deferred.
2810  *------------------------------------------------------------------------*/
2811 static uint8_t
2812 usbd_callback_wrapper_sub(struct usb_xfer *xfer)
2813 {
2814 	struct usb_endpoint *ep;
2815 	struct usb_bus *bus;
2816 	usb_frcount_t x;
2817 
2818 	bus = xfer->xroot->bus;
2819 
2820 	if ((!xfer->flags_int.open) &&
2821 	    (!xfer->flags_int.did_close)) {
2822 		DPRINTF("close\n");
2823 		USB_BUS_LOCK(bus);
2824 		(xfer->endpoint->methods->close) (xfer);
2825 		USB_BUS_UNLOCK(bus);
2826 		/* only close once */
2827 		xfer->flags_int.did_close = 1;
2828 		return (1);		/* wait for new callback */
2829 	}
2830 	/*
2831 	 * If we have a non-hardware induced error we
2832 	 * need to do the DMA delay!
2833 	 */
2834 	if (xfer->error != 0 && !xfer->flags_int.did_dma_delay &&
2835 	    (xfer->error == USB_ERR_CANCELLED ||
2836 	    xfer->error == USB_ERR_TIMEOUT ||
2837 	    bus->methods->start_dma_delay != NULL)) {
2838 
2839 		usb_timeout_t temp;
2840 
2841 		/* only delay once */
2842 		xfer->flags_int.did_dma_delay = 1;
2843 
2844 		/* we can not cancel this delay */
2845 		xfer->flags_int.can_cancel_immed = 0;
2846 
2847 		temp = usbd_get_dma_delay(xfer->xroot->udev);
2848 
2849 		DPRINTFN(3, "DMA delay, %u ms, "
2850 		    "on %p\n", temp, xfer);
2851 
2852 		if (temp != 0) {
2853 			USB_BUS_LOCK(bus);
2854 			/*
2855 			 * Some hardware solutions have dedicated
2856 			 * events when it is safe to free DMA'ed
2857 			 * memory. For the other hardware platforms we
2858 			 * use a static delay.
2859 			 */
2860 			if (bus->methods->start_dma_delay != NULL) {
2861 				(bus->methods->start_dma_delay) (xfer);
2862 			} else {
2863 				usbd_transfer_timeout_ms(xfer,
2864 				    (void (*)(void *))&usb_dma_delay_done_cb,
2865 				    temp);
2866 			}
2867 			USB_BUS_UNLOCK(bus);
2868 			return (1);	/* wait for new callback */
2869 		}
2870 	}
2871 	/* check actual number of frames */
2872 	if (xfer->aframes > xfer->nframes) {
2873 		if (xfer->error == 0) {
2874 			panic("%s: actual number of frames, %d, is "
2875 			    "greater than initial number of frames, %d\n",
2876 			    __FUNCTION__, xfer->aframes, xfer->nframes);
2877 		} else {
2878 			/* just set some valid value */
2879 			xfer->aframes = xfer->nframes;
2880 		}
2881 	}
2882 	/* compute actual length */
2883 	xfer->actlen = 0;
2884 
2885 	for (x = 0; x != xfer->aframes; x++) {
2886 		xfer->actlen += xfer->frlengths[x];
2887 	}
2888 
2889 	/*
2890 	 * Frames that were not transferred get zero actual length in
2891 	 * case the USB device driver does not check the actual number
2892 	 * of frames transferred, "xfer->aframes":
2893 	 */
2894 	for (; x < xfer->nframes; x++) {
2895 		usbd_xfer_set_frame_len(xfer, x, 0);
2896 	}
2897 
2898 	/* check actual length */
2899 	if (xfer->actlen > xfer->sumlen) {
2900 		if (xfer->error == 0) {
2901 			panic("%s: actual length, %d, is greater than "
2902 			    "initial length, %d\n",
2903 			    __FUNCTION__, xfer->actlen, xfer->sumlen);
2904 		} else {
2905 			/* just set some valid value */
2906 			xfer->actlen = xfer->sumlen;
2907 		}
2908 	}
2909 	DPRINTFN(1, "xfer=%p endpoint=%p sts=%d alen=%d, slen=%d, afrm=%d, nfrm=%d\n",
2910 	    xfer, xfer->endpoint, xfer->error, xfer->actlen, xfer->sumlen,
2911 	    xfer->aframes, xfer->nframes);
2912 
2913 	if (xfer->error) {
2914 		/* end of control transfer, if any */
2915 		xfer->flags_int.control_act = 0;
2916 
2917 #if USB_HAVE_TT_SUPPORT
2918 		switch (xfer->error) {
2919 		case USB_ERR_NORMAL_COMPLETION:
2920 		case USB_ERR_SHORT_XFER:
2921 		case USB_ERR_STALLED:
2922 		case USB_ERR_CANCELLED:
2923 			/* nothing to do */
2924 			break;
2925 		default:
2926 			/* try to reset the TT, if any */
2927 			USB_BUS_LOCK(bus);
2928 			uhub_tt_buffer_reset_async_locked(xfer->xroot->udev, xfer->endpoint);
2929 			USB_BUS_UNLOCK(bus);
2930 			break;
2931 		}
2932 #endif
2933 		/* check if we should block the execution queue */
2934 		if ((xfer->error != USB_ERR_CANCELLED) &&
2935 		    (xfer->flags.pipe_bof)) {
2936 			DPRINTFN(2, "xfer=%p: Block On Failure "
2937 			    "on endpoint=%p\n", xfer, xfer->endpoint);
2938 			goto done;
2939 		}
2940 	} else {
2941 		/* check for short transfers */
2942 		if (xfer->actlen < xfer->sumlen) {
2943 
2944 			/* end of control transfer, if any */
2945 			xfer->flags_int.control_act = 0;
2946 
2947 			if (!xfer->flags_int.short_xfer_ok) {
2948 				xfer->error = USB_ERR_SHORT_XFER;
2949 				if (xfer->flags.pipe_bof) {
2950 					DPRINTFN(2, "xfer=%p: Block On Failure on "
2951 					    "Short Transfer on endpoint %p.\n",
2952 					    xfer, xfer->endpoint);
2953 					goto done;
2954 				}
2955 			}
2956 		} else {
2957 			/*
2958 			 * Check if we are in the middle of a
2959 			 * control transfer:
2960 			 */
2961 			if (xfer->flags_int.control_act) {
2962 				DPRINTFN(5, "xfer=%p: Control transfer "
2963 				    "active on endpoint=%p\n", xfer, xfer->endpoint);
2964 				goto done;
2965 			}
2966 		}
2967 	}
2968 
2969 	ep = xfer->endpoint;
2970 
2971 	/*
2972 	 * If the current USB transfer is completing we need to start the
2973 	 * next one:
2974 	 */
2975 	USB_BUS_LOCK(bus);
2976 	if (ep->endpoint_q[xfer->stream_id].curr == xfer) {
2977 		usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], NULL);
2978 
2979 		if (ep->endpoint_q[xfer->stream_id].curr != NULL ||
2980 		    TAILQ_FIRST(&ep->endpoint_q[xfer->stream_id].head) != NULL) {
2981 			/* there is another USB transfer waiting */
2982 		} else {
2983 			/* this is the last USB transfer */
2984 			/* clear isochronous sync flag */
2985 			xfer->endpoint->is_synced = 0;
2986 		}
2987 	}
2988 	USB_BUS_UNLOCK(bus);
2989 done:
2990 	return (0);
2991 }
2992 
2993 /*------------------------------------------------------------------------*
2994  *	usb_command_wrapper
2995  *
2996  * This function is used to execute commands non-recursivly on an USB
2997  * transfer.
2998  *------------------------------------------------------------------------*/
2999 void
3000 usb_command_wrapper(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
3001 {
3002 	if (xfer) {
3003 		/*
3004 		 * If the transfer is not already processing,
3005 		 * queue it!
3006 		 */
3007 		if (pq->curr != xfer) {
3008 			usbd_transfer_enqueue(pq, xfer);
3009 			if (pq->curr != NULL) {
3010 				/* something is already processing */
3011 				DPRINTFN(6, "busy %p\n", pq->curr);
3012 				return;
3013 			}
3014 		}
3015 	} else {
3016 		/* Get next element in queue */
3017 		pq->curr = NULL;
3018 	}
3019 
3020 	if (!pq->recurse_1) {
3021 
3022 		do {
3023 
3024 			/* set both recurse flags */
3025 			pq->recurse_1 = 1;
3026 			pq->recurse_2 = 1;
3027 
3028 			if (pq->curr == NULL) {
3029 				xfer = TAILQ_FIRST(&pq->head);
3030 				if (xfer) {
3031 					TAILQ_REMOVE(&pq->head, xfer,
3032 					    wait_entry);
3033 					xfer->wait_queue = NULL;
3034 					pq->curr = xfer;
3035 				} else {
3036 					break;
3037 				}
3038 			}
3039 			DPRINTFN(6, "cb %p (enter)\n", pq->curr);
3040 			(pq->command) (pq);
3041 			DPRINTFN(6, "cb %p (leave)\n", pq->curr);
3042 
3043 		} while (!pq->recurse_2);
3044 
3045 		/* clear first recurse flag */
3046 		pq->recurse_1 = 0;
3047 
3048 	} else {
3049 		/* clear second recurse flag */
3050 		pq->recurse_2 = 0;
3051 	}
3052 }
3053 
3054 /*------------------------------------------------------------------------*
3055  *	usbd_ctrl_transfer_setup
3056  *
3057  * This function is used to setup the default USB control endpoint
3058  * transfer.
3059  *------------------------------------------------------------------------*/
3060 void
3061 usbd_ctrl_transfer_setup(struct usb_device *udev)
3062 {
3063 	struct usb_xfer *xfer;
3064 	uint8_t no_resetup;
3065 	uint8_t iface_index;
3066 
3067 	/* check for root HUB */
3068 	if (udev->parent_hub == NULL)
3069 		return;
3070 repeat:
3071 
3072 	xfer = udev->ctrl_xfer[0];
3073 	if (xfer) {
3074 		USB_XFER_LOCK(xfer);
3075 		no_resetup =
3076 		    ((xfer->address == udev->address) &&
3077 		    (udev->ctrl_ep_desc.wMaxPacketSize[0] ==
3078 		    udev->ddesc.bMaxPacketSize));
3079 		if (udev->flags.usb_mode == USB_MODE_DEVICE) {
3080 			if (no_resetup) {
3081 				/*
3082 				 * NOTE: checking "xfer->address" and
3083 				 * starting the USB transfer must be
3084 				 * atomic!
3085 				 */
3086 				usbd_transfer_start(xfer);
3087 			}
3088 		}
3089 		USB_XFER_UNLOCK(xfer);
3090 	} else {
3091 		no_resetup = 0;
3092 	}
3093 
3094 	if (no_resetup) {
3095 		/*
3096 	         * All parameters are exactly the same like before.
3097 	         * Just return.
3098 	         */
3099 		return;
3100 	}
3101 	/*
3102 	 * Update wMaxPacketSize for the default control endpoint:
3103 	 */
3104 	udev->ctrl_ep_desc.wMaxPacketSize[0] =
3105 	    udev->ddesc.bMaxPacketSize;
3106 
3107 	/*
3108 	 * Unsetup any existing USB transfer:
3109 	 */
3110 	usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX);
3111 
3112 	/*
3113 	 * Reset clear stall error counter.
3114 	 */
3115 	udev->clear_stall_errors = 0;
3116 
3117 	/*
3118 	 * Try to setup a new USB transfer for the
3119 	 * default control endpoint:
3120 	 */
3121 	iface_index = 0;
3122 	if (usbd_transfer_setup(udev, &iface_index,
3123 	    udev->ctrl_xfer, usb_control_ep_cfg, USB_CTRL_XFER_MAX, NULL,
3124 	    &udev->device_mtx)) {
3125 		DPRINTFN(0, "could not setup default "
3126 		    "USB transfer\n");
3127 	} else {
3128 		goto repeat;
3129 	}
3130 }
3131 
3132 /*------------------------------------------------------------------------*
3133  *	usbd_clear_data_toggle - factored out code
3134  *
3135  * NOTE: the intention of this function is not to reset the hardware
3136  * data toggle.
3137  *------------------------------------------------------------------------*/
3138 void
3139 usbd_clear_stall_locked(struct usb_device *udev, struct usb_endpoint *ep)
3140 {
3141 	USB_BUS_LOCK_ASSERT(udev->bus, MA_OWNED);
3142 
3143 	/* check that we have a valid case */
3144 	if (udev->flags.usb_mode == USB_MODE_HOST &&
3145 	    udev->parent_hub != NULL &&
3146 	    udev->bus->methods->clear_stall != NULL &&
3147 	    ep->methods != NULL) {
3148 		(udev->bus->methods->clear_stall) (udev, ep);
3149 	}
3150 }
3151 
3152 /*------------------------------------------------------------------------*
3153  *	usbd_clear_data_toggle - factored out code
3154  *
3155  * NOTE: the intention of this function is not to reset the hardware
3156  * data toggle on the USB device side.
3157  *------------------------------------------------------------------------*/
3158 void
3159 usbd_clear_data_toggle(struct usb_device *udev, struct usb_endpoint *ep)
3160 {
3161 	DPRINTFN(5, "udev=%p endpoint=%p\n", udev, ep);
3162 
3163 	USB_BUS_LOCK(udev->bus);
3164 	ep->toggle_next = 0;
3165 	/* some hardware needs a callback to clear the data toggle */
3166 	usbd_clear_stall_locked(udev, ep);
3167 	USB_BUS_UNLOCK(udev->bus);
3168 }
3169 
3170 /*------------------------------------------------------------------------*
3171  *	usbd_clear_stall_callback - factored out clear stall callback
3172  *
3173  * Input parameters:
3174  *  xfer1: Clear Stall Control Transfer
3175  *  xfer2: Stalled USB Transfer
3176  *
3177  * This function is NULL safe.
3178  *
3179  * Return values:
3180  *   0: In progress
3181  *   Else: Finished
3182  *
3183  * Clear stall config example:
3184  *
3185  * static const struct usb_config my_clearstall =  {
3186  *	.type = UE_CONTROL,
3187  *	.endpoint = 0,
3188  *	.direction = UE_DIR_ANY,
3189  *	.interval = 50, //50 milliseconds
3190  *	.bufsize = sizeof(struct usb_device_request),
3191  *	.timeout = 1000, //1.000 seconds
3192  *	.callback = &my_clear_stall_callback, // **
3193  *	.usb_mode = USB_MODE_HOST,
3194  * };
3195  *
3196  * ** "my_clear_stall_callback" calls "usbd_clear_stall_callback"
3197  * passing the correct parameters.
3198  *------------------------------------------------------------------------*/
3199 uint8_t
3200 usbd_clear_stall_callback(struct usb_xfer *xfer1,
3201     struct usb_xfer *xfer2)
3202 {
3203 	struct usb_device_request req;
3204 
3205 	if (xfer2 == NULL) {
3206 		/* looks like we are tearing down */
3207 		DPRINTF("NULL input parameter\n");
3208 		return (0);
3209 	}
3210 	USB_XFER_LOCK_ASSERT(xfer1, MA_OWNED);
3211 	USB_XFER_LOCK_ASSERT(xfer2, MA_OWNED);
3212 
3213 	switch (USB_GET_STATE(xfer1)) {
3214 	case USB_ST_SETUP:
3215 
3216 		/*
3217 		 * pre-clear the data toggle to DATA0 ("umass.c" and
3218 		 * "ata-usb.c" depends on this)
3219 		 */
3220 
3221 		usbd_clear_data_toggle(xfer2->xroot->udev, xfer2->endpoint);
3222 
3223 		/* setup a clear-stall packet */
3224 
3225 		req.bmRequestType = UT_WRITE_ENDPOINT;
3226 		req.bRequest = UR_CLEAR_FEATURE;
3227 		USETW(req.wValue, UF_ENDPOINT_HALT);
3228 		req.wIndex[0] = xfer2->endpoint->edesc->bEndpointAddress;
3229 		req.wIndex[1] = 0;
3230 		USETW(req.wLength, 0);
3231 
3232 		/*
3233 		 * "usbd_transfer_setup_sub()" will ensure that
3234 		 * we have sufficient room in the buffer for
3235 		 * the request structure!
3236 		 */
3237 
3238 		/* copy in the transfer */
3239 
3240 		usbd_copy_in(xfer1->frbuffers, 0, &req, sizeof(req));
3241 
3242 		/* set length */
3243 		xfer1->frlengths[0] = sizeof(req);
3244 		xfer1->nframes = 1;
3245 
3246 		usbd_transfer_submit(xfer1);
3247 		return (0);
3248 
3249 	case USB_ST_TRANSFERRED:
3250 		break;
3251 
3252 	default:			/* Error */
3253 		if (xfer1->error == USB_ERR_CANCELLED) {
3254 			return (0);
3255 		}
3256 		break;
3257 	}
3258 	return (1);			/* Clear Stall Finished */
3259 }
3260 
3261 /*------------------------------------------------------------------------*
3262  *	usbd_transfer_poll
3263  *
3264  * The following function gets called from the USB keyboard driver and
3265  * UMASS when the system has paniced.
3266  *
3267  * NOTE: It is currently not possible to resume normal operation on
3268  * the USB controller which has been polled, due to clearing of the
3269  * "up_dsleep" and "up_msleep" flags.
3270  *------------------------------------------------------------------------*/
3271 void
3272 usbd_transfer_poll(struct usb_xfer **ppxfer, uint16_t max)
3273 {
3274 	struct usb_xfer *xfer;
3275 	struct usb_xfer_root *xroot;
3276 	struct usb_device *udev;
3277 	struct usb_proc_msg *pm;
3278 	uint16_t n;
3279 	uint16_t drop_bus;
3280 	uint16_t drop_xfer;
3281 
3282 	for (n = 0; n != max; n++) {
3283 		/* Extra checks to avoid panic */
3284 		xfer = ppxfer[n];
3285 		if (xfer == NULL)
3286 			continue;	/* no USB transfer */
3287 		xroot = xfer->xroot;
3288 		if (xroot == NULL)
3289 			continue;	/* no USB root */
3290 		udev = xroot->udev;
3291 		if (udev == NULL)
3292 			continue;	/* no USB device */
3293 		if (udev->bus == NULL)
3294 			continue;	/* no BUS structure */
3295 		if (udev->bus->methods == NULL)
3296 			continue;	/* no BUS methods */
3297 		if (udev->bus->methods->xfer_poll == NULL)
3298 			continue;	/* no poll method */
3299 
3300 		/* make sure that the BUS mutex is not locked */
3301 		drop_bus = 0;
3302 		while (mtx_owned(&xroot->udev->bus->bus_mtx) && !SCHEDULER_STOPPED()) {
3303 			mtx_unlock(&xroot->udev->bus->bus_mtx);
3304 			drop_bus++;
3305 		}
3306 
3307 		/* make sure that the transfer mutex is not locked */
3308 		drop_xfer = 0;
3309 		while (mtx_owned(xroot->xfer_mtx) && !SCHEDULER_STOPPED()) {
3310 			mtx_unlock(xroot->xfer_mtx);
3311 			drop_xfer++;
3312 		}
3313 
3314 		/* Make sure cv_signal() and cv_broadcast() is not called */
3315 		USB_BUS_CONTROL_XFER_PROC(udev->bus)->up_msleep = 0;
3316 		USB_BUS_EXPLORE_PROC(udev->bus)->up_msleep = 0;
3317 		USB_BUS_GIANT_PROC(udev->bus)->up_msleep = 0;
3318 		USB_BUS_NON_GIANT_PROC(udev->bus)->up_msleep = 0;
3319 
3320 		/* poll USB hardware */
3321 		(udev->bus->methods->xfer_poll) (udev->bus);
3322 
3323 		USB_BUS_LOCK(xroot->bus);
3324 
3325 		/* check for clear stall */
3326 		if (udev->ctrl_xfer[1] != NULL) {
3327 
3328 			/* poll clear stall start */
3329 			pm = &udev->cs_msg[0].hdr;
3330 			(pm->pm_callback) (pm);
3331 			/* poll clear stall done thread */
3332 			pm = &udev->ctrl_xfer[1]->
3333 			    xroot->done_m[0].hdr;
3334 			(pm->pm_callback) (pm);
3335 		}
3336 
3337 		/* poll done thread */
3338 		pm = &xroot->done_m[0].hdr;
3339 		(pm->pm_callback) (pm);
3340 
3341 		USB_BUS_UNLOCK(xroot->bus);
3342 
3343 		/* restore transfer mutex */
3344 		while (drop_xfer--)
3345 			mtx_lock(xroot->xfer_mtx);
3346 
3347 		/* restore BUS mutex */
3348 		while (drop_bus--)
3349 			mtx_lock(&xroot->udev->bus->bus_mtx);
3350 	}
3351 }
3352 
3353 static void
3354 usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
3355     uint8_t type, enum usb_dev_speed speed)
3356 {
3357 	static const uint16_t intr_range_max[USB_SPEED_MAX] = {
3358 		[USB_SPEED_LOW] = 8,
3359 		[USB_SPEED_FULL] = 64,
3360 		[USB_SPEED_HIGH] = 1024,
3361 		[USB_SPEED_VARIABLE] = 1024,
3362 		[USB_SPEED_SUPER] = 1024,
3363 	};
3364 
3365 	static const uint16_t isoc_range_max[USB_SPEED_MAX] = {
3366 		[USB_SPEED_LOW] = 0,	/* invalid */
3367 		[USB_SPEED_FULL] = 1023,
3368 		[USB_SPEED_HIGH] = 1024,
3369 		[USB_SPEED_VARIABLE] = 3584,
3370 		[USB_SPEED_SUPER] = 1024,
3371 	};
3372 
3373 	static const uint16_t control_min[USB_SPEED_MAX] = {
3374 		[USB_SPEED_LOW] = 8,
3375 		[USB_SPEED_FULL] = 8,
3376 		[USB_SPEED_HIGH] = 64,
3377 		[USB_SPEED_VARIABLE] = 512,
3378 		[USB_SPEED_SUPER] = 512,
3379 	};
3380 
3381 	static const uint16_t bulk_min[USB_SPEED_MAX] = {
3382 		[USB_SPEED_LOW] = 8,
3383 		[USB_SPEED_FULL] = 8,
3384 		[USB_SPEED_HIGH] = 512,
3385 		[USB_SPEED_VARIABLE] = 512,
3386 		[USB_SPEED_SUPER] = 1024,
3387 	};
3388 
3389 	uint16_t temp;
3390 
3391 	memset(ptr, 0, sizeof(*ptr));
3392 
3393 	switch (type) {
3394 	case UE_INTERRUPT:
3395 		ptr->range.max = intr_range_max[speed];
3396 		break;
3397 	case UE_ISOCHRONOUS:
3398 		ptr->range.max = isoc_range_max[speed];
3399 		break;
3400 	default:
3401 		if (type == UE_BULK)
3402 			temp = bulk_min[speed];
3403 		else /* UE_CONTROL */
3404 			temp = control_min[speed];
3405 
3406 		/* default is fixed */
3407 		ptr->fixed[0] = temp;
3408 		ptr->fixed[1] = temp;
3409 		ptr->fixed[2] = temp;
3410 		ptr->fixed[3] = temp;
3411 
3412 		if (speed == USB_SPEED_FULL) {
3413 			/* multiple sizes */
3414 			ptr->fixed[1] = 16;
3415 			ptr->fixed[2] = 32;
3416 			ptr->fixed[3] = 64;
3417 		}
3418 		if ((speed == USB_SPEED_VARIABLE) &&
3419 		    (type == UE_BULK)) {
3420 			/* multiple sizes */
3421 			ptr->fixed[2] = 1024;
3422 			ptr->fixed[3] = 1536;
3423 		}
3424 		break;
3425 	}
3426 }
3427 
3428 void	*
3429 usbd_xfer_softc(struct usb_xfer *xfer)
3430 {
3431 	return (xfer->priv_sc);
3432 }
3433 
3434 void *
3435 usbd_xfer_get_priv(struct usb_xfer *xfer)
3436 {
3437 	return (xfer->priv_fifo);
3438 }
3439 
3440 void
3441 usbd_xfer_set_priv(struct usb_xfer *xfer, void *ptr)
3442 {
3443 	xfer->priv_fifo = ptr;
3444 }
3445 
3446 uint8_t
3447 usbd_xfer_state(struct usb_xfer *xfer)
3448 {
3449 	return (xfer->usb_state);
3450 }
3451 
3452 void
3453 usbd_xfer_set_flag(struct usb_xfer *xfer, int flag)
3454 {
3455 	switch (flag) {
3456 		case USB_FORCE_SHORT_XFER:
3457 			xfer->flags.force_short_xfer = 1;
3458 			break;
3459 		case USB_SHORT_XFER_OK:
3460 			xfer->flags.short_xfer_ok = 1;
3461 			break;
3462 		case USB_MULTI_SHORT_OK:
3463 			xfer->flags.short_frames_ok = 1;
3464 			break;
3465 		case USB_MANUAL_STATUS:
3466 			xfer->flags.manual_status = 1;
3467 			break;
3468 	}
3469 }
3470 
3471 void
3472 usbd_xfer_clr_flag(struct usb_xfer *xfer, int flag)
3473 {
3474 	switch (flag) {
3475 		case USB_FORCE_SHORT_XFER:
3476 			xfer->flags.force_short_xfer = 0;
3477 			break;
3478 		case USB_SHORT_XFER_OK:
3479 			xfer->flags.short_xfer_ok = 0;
3480 			break;
3481 		case USB_MULTI_SHORT_OK:
3482 			xfer->flags.short_frames_ok = 0;
3483 			break;
3484 		case USB_MANUAL_STATUS:
3485 			xfer->flags.manual_status = 0;
3486 			break;
3487 	}
3488 }
3489 
3490 /*
3491  * The following function returns in milliseconds when the isochronous
3492  * transfer was completed by the hardware. The returned value wraps
3493  * around 65536 milliseconds.
3494  */
3495 uint16_t
3496 usbd_xfer_get_timestamp(struct usb_xfer *xfer)
3497 {
3498 	return (xfer->isoc_time_complete);
3499 }
3500 
3501 /*
3502  * The following function returns non-zero if the max packet size
3503  * field was clamped to a valid value. Else it returns zero.
3504  */
3505 uint8_t
3506 usbd_xfer_maxp_was_clamped(struct usb_xfer *xfer)
3507 {
3508 	return (xfer->flags_int.maxp_was_clamped);
3509 }
3510