xref: /freebsd/sys/dev/usb/usb_transfer.c (revision 2a58b312b62f908ec92311d1bd8536dbaeb8e55b)
1 /* $FreeBSD$ */
2 /*-
3  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4  *
5  * Copyright (c) 2008-2021 Hans Petter Selasky. All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #ifdef USB_GLOBAL_INCLUDE_FILE
30 #include USB_GLOBAL_INCLUDE_FILE
31 #else
32 #include <sys/stdint.h>
33 #include <sys/stddef.h>
34 #include <sys/param.h>
35 #include <sys/queue.h>
36 #include <sys/types.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/bus.h>
40 #include <sys/module.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/condvar.h>
44 #include <sys/sysctl.h>
45 #include <sys/sx.h>
46 #include <sys/unistd.h>
47 #include <sys/callout.h>
48 #include <sys/malloc.h>
49 #include <sys/priv.h>
50 
51 #include <dev/usb/usb.h>
52 #include <dev/usb/usbdi.h>
53 #include <dev/usb/usbdi_util.h>
54 
55 #define	USB_DEBUG_VAR usb_debug
56 
57 #include <dev/usb/usb_core.h>
58 #include <dev/usb/usb_busdma.h>
59 #include <dev/usb/usb_process.h>
60 #include <dev/usb/usb_transfer.h>
61 #include <dev/usb/usb_device.h>
62 #include <dev/usb/usb_debug.h>
63 #include <dev/usb/usb_util.h>
64 
65 #include <dev/usb/usb_controller.h>
66 #include <dev/usb/usb_bus.h>
67 #include <dev/usb/usb_pf.h>
68 #endif			/* USB_GLOBAL_INCLUDE_FILE */
69 
70 struct usb_std_packet_size {
71 	struct {
72 		uint16_t min;		/* inclusive */
73 		uint16_t max;		/* inclusive */
74 	}	range;
75 
76 	uint16_t fixed[4];
77 };
78 
79 static usb_callback_t usb_request_callback;
80 
81 static const struct usb_config usb_control_ep_cfg[USB_CTRL_XFER_MAX] = {
82 	/* This transfer is used for generic control endpoint transfers */
83 
84 	[0] = {
85 		.type = UE_CONTROL,
86 		.endpoint = 0x00,	/* Control endpoint */
87 		.direction = UE_DIR_ANY,
88 		.bufsize = USB_EP0_BUFSIZE,	/* bytes */
89 		.flags = {.proxy_buffer = 1,},
90 		.callback = &usb_request_callback,
91 		.usb_mode = USB_MODE_DUAL,	/* both modes */
92 	},
93 
94 	/* This transfer is used for generic clear stall only */
95 
96 	[1] = {
97 		.type = UE_CONTROL,
98 		.endpoint = 0x00,	/* Control pipe */
99 		.direction = UE_DIR_ANY,
100 		.bufsize = sizeof(struct usb_device_request),
101 		.callback = &usb_do_clear_stall_callback,
102 		.timeout = 1000,	/* 1 second */
103 		.interval = 50,	/* 50ms */
104 		.usb_mode = USB_MODE_HOST,
105 	},
106 };
107 
108 static const struct usb_config usb_control_ep_quirk_cfg[USB_CTRL_XFER_MAX] = {
109 	/* This transfer is used for generic control endpoint transfers */
110 
111 	[0] = {
112 		.type = UE_CONTROL,
113 		.endpoint = 0x00,	/* Control endpoint */
114 		.direction = UE_DIR_ANY,
115 		.bufsize = 65535,	/* bytes */
116 		.callback = &usb_request_callback,
117 		.usb_mode = USB_MODE_DUAL,	/* both modes */
118 	},
119 
120 	/* This transfer is used for generic clear stall only */
121 
122 	[1] = {
123 		.type = UE_CONTROL,
124 		.endpoint = 0x00,	/* Control pipe */
125 		.direction = UE_DIR_ANY,
126 		.bufsize = sizeof(struct usb_device_request),
127 		.callback = &usb_do_clear_stall_callback,
128 		.timeout = 1000,	/* 1 second */
129 		.interval = 50,	/* 50ms */
130 		.usb_mode = USB_MODE_HOST,
131 	},
132 };
133 
134 /* function prototypes */
135 
136 static void	usbd_update_max_frame_size(struct usb_xfer *);
137 static void	usbd_transfer_unsetup_sub(struct usb_xfer_root *, uint8_t);
138 static void	usbd_control_transfer_init(struct usb_xfer *);
139 static int	usbd_setup_ctrl_transfer(struct usb_xfer *);
140 static void	usb_callback_proc(struct usb_proc_msg *);
141 static void	usbd_callback_ss_done_defer(struct usb_xfer *);
142 static void	usbd_callback_wrapper(struct usb_xfer_queue *);
143 static void	usbd_transfer_start_cb(void *);
144 static uint8_t	usbd_callback_wrapper_sub(struct usb_xfer *);
145 static void	usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
146 		    uint8_t type, enum usb_dev_speed speed);
147 
148 /*------------------------------------------------------------------------*
149  *	usb_request_callback
150  *------------------------------------------------------------------------*/
151 static void
152 usb_request_callback(struct usb_xfer *xfer, usb_error_t error)
153 {
154 	if (xfer->flags_int.usb_mode == USB_MODE_DEVICE)
155 		usb_handle_request_callback(xfer, error);
156 	else
157 		usbd_do_request_callback(xfer, error);
158 }
159 
160 /*------------------------------------------------------------------------*
161  *	usbd_update_max_frame_size
162  *
163  * This function updates the maximum frame size, hence high speed USB
164  * can transfer multiple consecutive packets.
165  *------------------------------------------------------------------------*/
166 static void
167 usbd_update_max_frame_size(struct usb_xfer *xfer)
168 {
169 	/* compute maximum frame size */
170 	/* this computation should not overflow 16-bit */
171 	/* max = 15 * 1024 */
172 
173 	xfer->max_frame_size = xfer->max_packet_size * xfer->max_packet_count;
174 }
175 
176 /*------------------------------------------------------------------------*
177  *	usbd_get_dma_delay
178  *
179  * The following function is called when we need to
180  * synchronize with DMA hardware.
181  *
182  * Returns:
183  *    0: no DMA delay required
184  * Else: milliseconds of DMA delay
185  *------------------------------------------------------------------------*/
186 usb_timeout_t
187 usbd_get_dma_delay(struct usb_device *udev)
188 {
189 	const struct usb_bus_methods *mtod;
190 	uint32_t temp;
191 
192 	mtod = udev->bus->methods;
193 	temp = 0;
194 
195 	if (mtod->get_dma_delay) {
196 		(mtod->get_dma_delay) (udev, &temp);
197 		/*
198 		 * Round up and convert to milliseconds. Note that we use
199 		 * 1024 milliseconds per second. to save a division.
200 		 */
201 		temp += 0x3FF;
202 		temp /= 0x400;
203 	}
204 	return (temp);
205 }
206 
207 /*------------------------------------------------------------------------*
208  *	usbd_transfer_setup_sub_malloc
209  *
210  * This function will allocate one or more DMA'able memory chunks
211  * according to "size", "align" and "count" arguments. "ppc" is
212  * pointed to a linear array of USB page caches afterwards.
213  *
214  * If the "align" argument is equal to "1" a non-contiguous allocation
215  * can happen. Else if the "align" argument is greater than "1", the
216  * allocation will always be contiguous in memory.
217  *
218  * Returns:
219  *    0: Success
220  * Else: Failure
221  *------------------------------------------------------------------------*/
222 #if USB_HAVE_BUSDMA
223 uint8_t
224 usbd_transfer_setup_sub_malloc(struct usb_setup_params *parm,
225     struct usb_page_cache **ppc, usb_size_t size, usb_size_t align,
226     usb_size_t count)
227 {
228 	struct usb_page_cache *pc;
229 	struct usb_page *pg;
230 	void *buf;
231 	usb_size_t n_dma_pc;
232 	usb_size_t n_dma_pg;
233 	usb_size_t n_obj;
234 	usb_size_t x;
235 	usb_size_t y;
236 	usb_size_t r;
237 	usb_size_t z;
238 
239 	USB_ASSERT(align > 0, ("Invalid alignment, 0x%08x\n",
240 	    align));
241 	USB_ASSERT(size > 0, ("Invalid size = 0\n"));
242 
243 	if (count == 0) {
244 		return (0);		/* nothing to allocate */
245 	}
246 	/*
247 	 * Make sure that the size is aligned properly.
248 	 */
249 	size = -((-size) & (-align));
250 
251 	/*
252 	 * Try multi-allocation chunks to reduce the number of DMA
253 	 * allocations, hence DMA allocations are slow.
254 	 */
255 	if (align == 1) {
256 		/* special case - non-cached multi page DMA memory */
257 		n_dma_pc = count;
258 		n_dma_pg = (2 + (size / USB_PAGE_SIZE));
259 		n_obj = 1;
260 	} else if (size >= USB_PAGE_SIZE) {
261 		n_dma_pc = count;
262 		n_dma_pg = 1;
263 		n_obj = 1;
264 	} else {
265 		/* compute number of objects per page */
266 #ifdef USB_DMA_SINGLE_ALLOC
267 		n_obj = 1;
268 #else
269 		n_obj = (USB_PAGE_SIZE / size);
270 #endif
271 		/*
272 		 * Compute number of DMA chunks, rounded up
273 		 * to nearest one:
274 		 */
275 		n_dma_pc = howmany(count, n_obj);
276 		n_dma_pg = 1;
277 	}
278 
279 	/*
280 	 * DMA memory is allocated once, but mapped twice. That's why
281 	 * there is one list for auto-free and another list for
282 	 * non-auto-free which only holds the mapping and not the
283 	 * allocation.
284 	 */
285 	if (parm->buf == NULL) {
286 		/* reserve memory (auto-free) */
287 		parm->dma_page_ptr += n_dma_pc * n_dma_pg;
288 		parm->dma_page_cache_ptr += n_dma_pc;
289 
290 		/* reserve memory (no-auto-free) */
291 		parm->dma_page_ptr += count * n_dma_pg;
292 		parm->xfer_page_cache_ptr += count;
293 		return (0);
294 	}
295 	for (x = 0; x != n_dma_pc; x++) {
296 		/* need to initialize the page cache */
297 		parm->dma_page_cache_ptr[x].tag_parent =
298 		    &parm->curr_xfer->xroot->dma_parent_tag;
299 	}
300 	for (x = 0; x != count; x++) {
301 		/* need to initialize the page cache */
302 		parm->xfer_page_cache_ptr[x].tag_parent =
303 		    &parm->curr_xfer->xroot->dma_parent_tag;
304 	}
305 
306 	if (ppc != NULL) {
307 		if (n_obj != 1)
308 			*ppc = parm->xfer_page_cache_ptr;
309 		else
310 			*ppc = parm->dma_page_cache_ptr;
311 	}
312 	r = count;			/* set remainder count */
313 	z = n_obj * size;		/* set allocation size */
314 	pc = parm->xfer_page_cache_ptr;
315 	pg = parm->dma_page_ptr;
316 
317 	if (n_obj == 1) {
318 	    /*
319 	     * Avoid mapping memory twice if only a single object
320 	     * should be allocated per page cache:
321 	     */
322 	    for (x = 0; x != n_dma_pc; x++) {
323 		if (usb_pc_alloc_mem(parm->dma_page_cache_ptr,
324 		    pg, z, align)) {
325 			return (1);	/* failure */
326 		}
327 		/* Make room for one DMA page cache and "n_dma_pg" pages */
328 		parm->dma_page_cache_ptr++;
329 		pg += n_dma_pg;
330 	    }
331 	} else {
332 	    for (x = 0; x != n_dma_pc; x++) {
333 		if (r < n_obj) {
334 			/* compute last remainder */
335 			z = r * size;
336 			n_obj = r;
337 		}
338 		if (usb_pc_alloc_mem(parm->dma_page_cache_ptr,
339 		    pg, z, align)) {
340 			return (1);	/* failure */
341 		}
342 		/* Set beginning of current buffer */
343 		buf = parm->dma_page_cache_ptr->buffer;
344 		/* Make room for one DMA page cache and "n_dma_pg" pages */
345 		parm->dma_page_cache_ptr++;
346 		pg += n_dma_pg;
347 
348 		for (y = 0; (y != n_obj); y++, r--, pc++, pg += n_dma_pg) {
349 			/* Load sub-chunk into DMA */
350 			if (usb_pc_dmamap_create(pc, size)) {
351 				return (1);	/* failure */
352 			}
353 			pc->buffer = USB_ADD_BYTES(buf, y * size);
354 			pc->page_start = pg;
355 
356 			USB_MTX_LOCK(pc->tag_parent->mtx);
357 			if (usb_pc_load_mem(pc, size, 1 /* synchronous */ )) {
358 				USB_MTX_UNLOCK(pc->tag_parent->mtx);
359 				return (1);	/* failure */
360 			}
361 			USB_MTX_UNLOCK(pc->tag_parent->mtx);
362 		}
363 	    }
364 	}
365 
366 	parm->xfer_page_cache_ptr = pc;
367 	parm->dma_page_ptr = pg;
368 	return (0);
369 }
370 #endif
371 
372 /*------------------------------------------------------------------------*
373  *	usbd_get_max_frame_length
374  *
375  * This function returns the maximum single frame length as computed by
376  * usbd_transfer_setup(). It is useful when computing buffer sizes for
377  * devices having multiple alternate settings. The SuperSpeed endpoint
378  * companion pointer is allowed to be NULL.
379  *------------------------------------------------------------------------*/
380 uint32_t
381 usbd_get_max_frame_length(const struct usb_endpoint_descriptor *edesc,
382     const struct usb_endpoint_ss_comp_descriptor *ecomp,
383     enum usb_dev_speed speed)
384 {
385 	uint32_t max_packet_size;
386 	uint32_t max_packet_count;
387 	uint8_t type;
388 
389 	max_packet_size = UGETW(edesc->wMaxPacketSize);
390 	max_packet_count = 1;
391 	type = (edesc->bmAttributes & UE_XFERTYPE);
392 
393 	switch (speed) {
394 	case USB_SPEED_HIGH:
395 		switch (type) {
396 		case UE_ISOCHRONOUS:
397 		case UE_INTERRUPT:
398 			max_packet_count +=
399 			    (max_packet_size >> 11) & 3;
400 
401 			/* check for invalid max packet count */
402 			if (max_packet_count > 3)
403 				max_packet_count = 3;
404 			break;
405 		default:
406 			break;
407 		}
408 		max_packet_size &= 0x7FF;
409 		break;
410 	case USB_SPEED_SUPER:
411 		max_packet_count += (max_packet_size >> 11) & 3;
412 
413 		if (ecomp != NULL)
414 			max_packet_count += ecomp->bMaxBurst;
415 
416 		if ((max_packet_count == 0) ||
417 		    (max_packet_count > 16))
418 			max_packet_count = 16;
419 
420 		switch (type) {
421 		case UE_CONTROL:
422 			max_packet_count = 1;
423 			break;
424 		case UE_ISOCHRONOUS:
425 			if (ecomp != NULL) {
426 				uint8_t mult;
427 
428 				mult = UE_GET_SS_ISO_MULT(
429 				    ecomp->bmAttributes) + 1;
430 				if (mult > 3)
431 					mult = 3;
432 
433 				max_packet_count *= mult;
434 			}
435 			break;
436 		default:
437 			break;
438 		}
439 		max_packet_size &= 0x7FF;
440 		break;
441 	default:
442 		break;
443 	}
444 	return (max_packet_size * max_packet_count);
445 }
446 
447 /*------------------------------------------------------------------------*
448  *	usbd_transfer_setup_sub - transfer setup subroutine
449  *
450  * This function must be called from the "xfer_setup" callback of the
451  * USB Host or Device controller driver when setting up an USB
452  * transfer. This function will setup correct packet sizes, buffer
453  * sizes, flags and more, that are stored in the "usb_xfer"
454  * structure.
455  *------------------------------------------------------------------------*/
456 void
457 usbd_transfer_setup_sub(struct usb_setup_params *parm)
458 {
459 	enum {
460 		REQ_SIZE = 8,
461 		MIN_PKT = 8,
462 	};
463 	struct usb_xfer *xfer = parm->curr_xfer;
464 	const struct usb_config *setup = parm->curr_setup;
465 	struct usb_endpoint_ss_comp_descriptor *ecomp;
466 	struct usb_endpoint_descriptor *edesc;
467 	struct usb_std_packet_size std_size;
468 	usb_frcount_t n_frlengths;
469 	usb_frcount_t n_frbuffers;
470 	usb_frcount_t x;
471 	uint16_t maxp_old;
472 	uint8_t type;
473 	uint8_t zmps;
474 
475 	/*
476 	 * Sanity check. The following parameters must be initialized before
477 	 * calling this function.
478 	 */
479 	if ((parm->hc_max_packet_size == 0) ||
480 	    (parm->hc_max_packet_count == 0) ||
481 	    (parm->hc_max_frame_size == 0)) {
482 		parm->err = USB_ERR_INVAL;
483 		goto done;
484 	}
485 	edesc = xfer->endpoint->edesc;
486 	ecomp = xfer->endpoint->ecomp;
487 
488 	type = (edesc->bmAttributes & UE_XFERTYPE);
489 
490 	xfer->flags = setup->flags;
491 	xfer->nframes = setup->frames;
492 	xfer->timeout = setup->timeout;
493 	xfer->callback = setup->callback;
494 	xfer->interval = setup->interval;
495 	xfer->endpointno = edesc->bEndpointAddress;
496 	xfer->max_packet_size = UGETW(edesc->wMaxPacketSize);
497 	xfer->max_packet_count = 1;
498 	/* make a shadow copy: */
499 	xfer->flags_int.usb_mode = parm->udev->flags.usb_mode;
500 
501 	parm->bufsize = setup->bufsize;
502 
503 	switch (parm->speed) {
504 	case USB_SPEED_HIGH:
505 		switch (type) {
506 		case UE_ISOCHRONOUS:
507 		case UE_INTERRUPT:
508 			xfer->max_packet_count +=
509 			    (xfer->max_packet_size >> 11) & 3;
510 
511 			/* check for invalid max packet count */
512 			if (xfer->max_packet_count > 3)
513 				xfer->max_packet_count = 3;
514 			break;
515 		default:
516 			break;
517 		}
518 		xfer->max_packet_size &= 0x7FF;
519 		break;
520 	case USB_SPEED_SUPER:
521 		xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3;
522 
523 		if (ecomp != NULL)
524 			xfer->max_packet_count += ecomp->bMaxBurst;
525 
526 		if ((xfer->max_packet_count == 0) ||
527 		    (xfer->max_packet_count > 16))
528 			xfer->max_packet_count = 16;
529 
530 		switch (type) {
531 		case UE_CONTROL:
532 			xfer->max_packet_count = 1;
533 			break;
534 		case UE_ISOCHRONOUS:
535 			if (ecomp != NULL) {
536 				uint8_t mult;
537 
538 				mult = UE_GET_SS_ISO_MULT(
539 				    ecomp->bmAttributes) + 1;
540 				if (mult > 3)
541 					mult = 3;
542 
543 				xfer->max_packet_count *= mult;
544 			}
545 			break;
546 		default:
547 			break;
548 		}
549 		xfer->max_packet_size &= 0x7FF;
550 		break;
551 	default:
552 		break;
553 	}
554 	/* range check "max_packet_count" */
555 
556 	if (xfer->max_packet_count > parm->hc_max_packet_count) {
557 		xfer->max_packet_count = parm->hc_max_packet_count;
558 	}
559 
560 	/* store max packet size value before filtering */
561 
562 	maxp_old = xfer->max_packet_size;
563 
564 	/* filter "wMaxPacketSize" according to HC capabilities */
565 
566 	if ((xfer->max_packet_size > parm->hc_max_packet_size) ||
567 	    (xfer->max_packet_size == 0)) {
568 		xfer->max_packet_size = parm->hc_max_packet_size;
569 	}
570 	/* filter "wMaxPacketSize" according to standard sizes */
571 
572 	usbd_get_std_packet_size(&std_size, type, parm->speed);
573 
574 	if (std_size.range.min || std_size.range.max) {
575 		if (xfer->max_packet_size < std_size.range.min) {
576 			xfer->max_packet_size = std_size.range.min;
577 		}
578 		if (xfer->max_packet_size > std_size.range.max) {
579 			xfer->max_packet_size = std_size.range.max;
580 		}
581 	} else {
582 		if (xfer->max_packet_size >= std_size.fixed[3]) {
583 			xfer->max_packet_size = std_size.fixed[3];
584 		} else if (xfer->max_packet_size >= std_size.fixed[2]) {
585 			xfer->max_packet_size = std_size.fixed[2];
586 		} else if (xfer->max_packet_size >= std_size.fixed[1]) {
587 			xfer->max_packet_size = std_size.fixed[1];
588 		} else {
589 			/* only one possibility left */
590 			xfer->max_packet_size = std_size.fixed[0];
591 		}
592 	}
593 
594 	/*
595 	 * Check if the max packet size was outside its allowed range
596 	 * and clamped to a valid value:
597 	 */
598 	if (maxp_old != xfer->max_packet_size)
599 		xfer->flags_int.maxp_was_clamped = 1;
600 
601 	/* compute "max_frame_size" */
602 
603 	usbd_update_max_frame_size(xfer);
604 
605 	/* check interrupt interval and transfer pre-delay */
606 
607 	if (type == UE_ISOCHRONOUS) {
608 		uint16_t frame_limit;
609 
610 		xfer->interval = 0;	/* not used, must be zero */
611 		xfer->flags_int.isochronous_xfr = 1;	/* set flag */
612 
613 		if (xfer->timeout == 0) {
614 			/*
615 			 * set a default timeout in
616 			 * case something goes wrong!
617 			 */
618 			xfer->timeout = 1000 / 4;
619 		}
620 		switch (parm->speed) {
621 		case USB_SPEED_LOW:
622 		case USB_SPEED_FULL:
623 			frame_limit = USB_MAX_FS_ISOC_FRAMES_PER_XFER;
624 			xfer->fps_shift = 0;
625 			break;
626 		default:
627 			frame_limit = USB_MAX_HS_ISOC_FRAMES_PER_XFER;
628 			xfer->fps_shift = edesc->bInterval;
629 			if (xfer->fps_shift > 0)
630 				xfer->fps_shift--;
631 			if (xfer->fps_shift > 3)
632 				xfer->fps_shift = 3;
633 			if (xfer->flags.pre_scale_frames != 0)
634 				xfer->nframes <<= (3 - xfer->fps_shift);
635 			break;
636 		}
637 
638 		if (xfer->nframes > frame_limit) {
639 			/*
640 			 * this is not going to work
641 			 * cross hardware
642 			 */
643 			parm->err = USB_ERR_INVAL;
644 			goto done;
645 		}
646 		if (xfer->nframes == 0) {
647 			/*
648 			 * this is not a valid value
649 			 */
650 			parm->err = USB_ERR_ZERO_NFRAMES;
651 			goto done;
652 		}
653 	} else {
654 		/*
655 		 * If a value is specified use that else check the
656 		 * endpoint descriptor!
657 		 */
658 		if (type == UE_INTERRUPT) {
659 			uint32_t temp;
660 
661 			if (xfer->interval == 0) {
662 				xfer->interval = edesc->bInterval;
663 
664 				switch (parm->speed) {
665 				case USB_SPEED_LOW:
666 				case USB_SPEED_FULL:
667 					break;
668 				default:
669 					/* 125us -> 1ms */
670 					if (xfer->interval < 4)
671 						xfer->interval = 1;
672 					else if (xfer->interval > 16)
673 						xfer->interval = (1 << (16 - 4));
674 					else
675 						xfer->interval =
676 						    (1 << (xfer->interval - 4));
677 					break;
678 				}
679 			}
680 
681 			if (xfer->interval == 0) {
682 				/*
683 				 * One millisecond is the smallest
684 				 * interval we support:
685 				 */
686 				xfer->interval = 1;
687 			}
688 
689 			xfer->fps_shift = 0;
690 			temp = 1;
691 
692 			while ((temp != 0) && (temp < xfer->interval)) {
693 				xfer->fps_shift++;
694 				temp *= 2;
695 			}
696 
697 			switch (parm->speed) {
698 			case USB_SPEED_LOW:
699 			case USB_SPEED_FULL:
700 				break;
701 			default:
702 				xfer->fps_shift += 3;
703 				break;
704 			}
705 		}
706 	}
707 
708 	/*
709 	 * NOTE: we do not allow "max_packet_size" or "max_frame_size"
710 	 * to be equal to zero when setting up USB transfers, hence
711 	 * this leads to a lot of extra code in the USB kernel.
712 	 */
713 
714 	if ((xfer->max_frame_size == 0) ||
715 	    (xfer->max_packet_size == 0)) {
716 		zmps = 1;
717 
718 		if ((parm->bufsize <= MIN_PKT) &&
719 		    (type != UE_CONTROL) &&
720 		    (type != UE_BULK)) {
721 			/* workaround */
722 			xfer->max_packet_size = MIN_PKT;
723 			xfer->max_packet_count = 1;
724 			parm->bufsize = 0;	/* automatic setup length */
725 			usbd_update_max_frame_size(xfer);
726 
727 		} else {
728 			parm->err = USB_ERR_ZERO_MAXP;
729 			goto done;
730 		}
731 
732 	} else {
733 		zmps = 0;
734 	}
735 
736 	/*
737 	 * check if we should setup a default
738 	 * length:
739 	 */
740 
741 	if (parm->bufsize == 0) {
742 		parm->bufsize = xfer->max_frame_size;
743 
744 		if (type == UE_ISOCHRONOUS) {
745 			parm->bufsize *= xfer->nframes;
746 		}
747 	}
748 	/*
749 	 * check if we are about to setup a proxy
750 	 * type of buffer:
751 	 */
752 
753 	if (xfer->flags.proxy_buffer) {
754 		/* round bufsize up */
755 
756 		parm->bufsize += (xfer->max_frame_size - 1);
757 
758 		if (parm->bufsize < xfer->max_frame_size) {
759 			/* length wrapped around */
760 			parm->err = USB_ERR_INVAL;
761 			goto done;
762 		}
763 		/* subtract remainder */
764 
765 		parm->bufsize -= (parm->bufsize % xfer->max_frame_size);
766 
767 		/* add length of USB device request structure, if any */
768 
769 		if (type == UE_CONTROL) {
770 			parm->bufsize += REQ_SIZE;	/* SETUP message */
771 		}
772 	}
773 	xfer->max_data_length = parm->bufsize;
774 
775 	/* Setup "n_frlengths" and "n_frbuffers" */
776 
777 	if (type == UE_ISOCHRONOUS) {
778 		n_frlengths = xfer->nframes;
779 		n_frbuffers = 1;
780 	} else {
781 		if (type == UE_CONTROL) {
782 			xfer->flags_int.control_xfr = 1;
783 			if (xfer->nframes == 0) {
784 				if (parm->bufsize <= REQ_SIZE) {
785 					/*
786 					 * there will never be any data
787 					 * stage
788 					 */
789 					xfer->nframes = 1;
790 				} else {
791 					xfer->nframes = 2;
792 				}
793 			}
794 		} else {
795 			if (xfer->nframes == 0) {
796 				xfer->nframes = 1;
797 			}
798 		}
799 
800 		n_frlengths = xfer->nframes;
801 		n_frbuffers = xfer->nframes;
802 	}
803 
804 	/*
805 	 * check if we have room for the
806 	 * USB device request structure:
807 	 */
808 
809 	if (type == UE_CONTROL) {
810 		if (xfer->max_data_length < REQ_SIZE) {
811 			/* length wrapped around or too small bufsize */
812 			parm->err = USB_ERR_INVAL;
813 			goto done;
814 		}
815 		xfer->max_data_length -= REQ_SIZE;
816 	}
817 	/*
818 	 * Setup "frlengths" and shadow "frlengths" for keeping the
819 	 * initial frame lengths when a USB transfer is complete. This
820 	 * information is useful when computing isochronous offsets.
821 	 */
822 	xfer->frlengths = parm->xfer_length_ptr;
823 	parm->xfer_length_ptr += 2 * n_frlengths;
824 
825 	/* setup "frbuffers" */
826 	xfer->frbuffers = parm->xfer_page_cache_ptr;
827 	parm->xfer_page_cache_ptr += n_frbuffers;
828 
829 	/* initialize max frame count */
830 	xfer->max_frame_count = xfer->nframes;
831 
832 	/*
833 	 * check if we need to setup
834 	 * a local buffer:
835 	 */
836 
837 	if (!xfer->flags.ext_buffer) {
838 #if USB_HAVE_BUSDMA
839 		struct usb_page_search page_info;
840 		struct usb_page_cache *pc;
841 
842 		if (usbd_transfer_setup_sub_malloc(parm,
843 		    &pc, parm->bufsize, 1, 1)) {
844 			parm->err = USB_ERR_NOMEM;
845 		} else if (parm->buf != NULL) {
846 			usbd_get_page(pc, 0, &page_info);
847 
848 			xfer->local_buffer = page_info.buffer;
849 
850 			usbd_xfer_set_frame_offset(xfer, 0, 0);
851 
852 			if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
853 				usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
854 			}
855 		}
856 #else
857 		/* align data */
858 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
859 
860 		if (parm->buf != NULL) {
861 			xfer->local_buffer =
862 			    USB_ADD_BYTES(parm->buf, parm->size[0]);
863 
864 			usbd_xfer_set_frame_offset(xfer, 0, 0);
865 
866 			if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
867 				usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
868 			}
869 		}
870 		parm->size[0] += parm->bufsize;
871 
872 		/* align data again */
873 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
874 #endif
875 	}
876 	/*
877 	 * Compute maximum buffer size
878 	 */
879 
880 	if (parm->bufsize_max < parm->bufsize) {
881 		parm->bufsize_max = parm->bufsize;
882 	}
883 #if USB_HAVE_BUSDMA
884 	if (xfer->flags_int.bdma_enable) {
885 		/*
886 		 * Setup "dma_page_ptr".
887 		 *
888 		 * Proof for formula below:
889 		 *
890 		 * Assume there are three USB frames having length "a", "b" and
891 		 * "c". These USB frames will at maximum need "z"
892 		 * "usb_page" structures. "z" is given by:
893 		 *
894 		 * z = ((a / USB_PAGE_SIZE) + 2) + ((b / USB_PAGE_SIZE) + 2) +
895 		 * ((c / USB_PAGE_SIZE) + 2);
896 		 *
897 		 * Constraining "a", "b" and "c" like this:
898 		 *
899 		 * (a + b + c) <= parm->bufsize
900 		 *
901 		 * We know that:
902 		 *
903 		 * z <= ((parm->bufsize / USB_PAGE_SIZE) + (3*2));
904 		 *
905 		 * Here is the general formula:
906 		 */
907 		xfer->dma_page_ptr = parm->dma_page_ptr;
908 		parm->dma_page_ptr += (2 * n_frbuffers);
909 		parm->dma_page_ptr += (parm->bufsize / USB_PAGE_SIZE);
910 	}
911 #endif
912 	if (zmps) {
913 		/* correct maximum data length */
914 		xfer->max_data_length = 0;
915 	}
916 	/* subtract USB frame remainder from "hc_max_frame_size" */
917 
918 	xfer->max_hc_frame_size =
919 	    (parm->hc_max_frame_size -
920 	    (parm->hc_max_frame_size % xfer->max_frame_size));
921 
922 	if (xfer->max_hc_frame_size == 0) {
923 		parm->err = USB_ERR_INVAL;
924 		goto done;
925 	}
926 
927 	/* initialize frame buffers */
928 
929 	if (parm->buf) {
930 		for (x = 0; x != n_frbuffers; x++) {
931 			xfer->frbuffers[x].tag_parent =
932 			    &xfer->xroot->dma_parent_tag;
933 #if USB_HAVE_BUSDMA
934 			if (xfer->flags_int.bdma_enable &&
935 			    (parm->bufsize_max > 0)) {
936 				if (usb_pc_dmamap_create(
937 				    xfer->frbuffers + x,
938 				    parm->bufsize_max)) {
939 					parm->err = USB_ERR_NOMEM;
940 					goto done;
941 				}
942 			}
943 #endif
944 		}
945 	}
946 done:
947 	if (parm->err) {
948 		/*
949 		 * Set some dummy values so that we avoid division by zero:
950 		 */
951 		xfer->max_hc_frame_size = 1;
952 		xfer->max_frame_size = 1;
953 		xfer->max_packet_size = 1;
954 		xfer->max_data_length = 0;
955 		xfer->nframes = 0;
956 		xfer->max_frame_count = 0;
957 	}
958 }
959 
960 static uint8_t
961 usbd_transfer_setup_has_bulk(const struct usb_config *setup_start,
962     uint16_t n_setup)
963 {
964 	while (n_setup--) {
965 		uint8_t type = setup_start[n_setup].type;
966 		if (type == UE_BULK || type == UE_BULK_INTR ||
967 		    type == UE_TYPE_ANY)
968 			return (1);
969 	}
970 	return (0);
971 }
972 
973 /*------------------------------------------------------------------------*
974  *	usbd_transfer_setup - setup an array of USB transfers
975  *
976  * NOTE: You must always call "usbd_transfer_unsetup" after calling
977  * "usbd_transfer_setup" if success was returned.
978  *
979  * The idea is that the USB device driver should pre-allocate all its
980  * transfers by one call to this function.
981  *
982  * Return values:
983  *    0: Success
984  * Else: Failure
985  *------------------------------------------------------------------------*/
986 usb_error_t
987 usbd_transfer_setup(struct usb_device *udev,
988     const uint8_t *ifaces, struct usb_xfer **ppxfer,
989     const struct usb_config *setup_start, uint16_t n_setup,
990     void *priv_sc, struct mtx *xfer_mtx)
991 {
992 	const struct usb_config *setup_end = setup_start + n_setup;
993 	const struct usb_config *setup;
994 	struct usb_setup_params *parm;
995 	struct usb_endpoint *ep;
996 	struct usb_xfer_root *info;
997 	struct usb_xfer *xfer;
998 	void *buf = NULL;
999 	usb_error_t error = 0;
1000 	uint16_t n;
1001 	uint16_t refcount;
1002 	uint8_t do_unlock;
1003 
1004 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1005 	    "usbd_transfer_setup can sleep!");
1006 
1007 	/* do some checking first */
1008 
1009 	if (n_setup == 0) {
1010 		DPRINTFN(6, "setup array has zero length!\n");
1011 		return (USB_ERR_INVAL);
1012 	}
1013 	if (ifaces == NULL) {
1014 		DPRINTFN(6, "ifaces array is NULL!\n");
1015 		return (USB_ERR_INVAL);
1016 	}
1017 	if (xfer_mtx == NULL) {
1018 		DPRINTFN(6, "using global lock\n");
1019 		xfer_mtx = &Giant;
1020 	}
1021 
1022 	/* more sanity checks */
1023 
1024 	for (setup = setup_start, n = 0;
1025 	    setup != setup_end; setup++, n++) {
1026 		if (setup->bufsize == (usb_frlength_t)-1) {
1027 			error = USB_ERR_BAD_BUFSIZE;
1028 			DPRINTF("invalid bufsize\n");
1029 		}
1030 		if (setup->callback == NULL) {
1031 			error = USB_ERR_NO_CALLBACK;
1032 			DPRINTF("no callback\n");
1033 		}
1034 		ppxfer[n] = NULL;
1035 	}
1036 
1037 	if (error)
1038 		return (error);
1039 
1040 	/* Protect scratch area */
1041 	do_unlock = usbd_ctrl_lock(udev);
1042 
1043 	refcount = 0;
1044 	info = NULL;
1045 
1046 	parm = &udev->scratch.xfer_setup[0].parm;
1047 	memset(parm, 0, sizeof(*parm));
1048 
1049 	parm->udev = udev;
1050 	parm->speed = usbd_get_speed(udev);
1051 	parm->hc_max_packet_count = 1;
1052 
1053 	if (parm->speed >= USB_SPEED_MAX) {
1054 		parm->err = USB_ERR_INVAL;
1055 		goto done;
1056 	}
1057 	/* setup all transfers */
1058 
1059 	while (1) {
1060 		if (buf) {
1061 			/*
1062 			 * Initialize the "usb_xfer_root" structure,
1063 			 * which is common for all our USB transfers.
1064 			 */
1065 			info = USB_ADD_BYTES(buf, 0);
1066 
1067 			info->memory_base = buf;
1068 			info->memory_size = parm->size[0];
1069 
1070 #if USB_HAVE_BUSDMA
1071 			info->dma_page_cache_start = USB_ADD_BYTES(buf, parm->size[4]);
1072 			info->dma_page_cache_end = USB_ADD_BYTES(buf, parm->size[5]);
1073 #endif
1074 			info->xfer_page_cache_start = USB_ADD_BYTES(buf, parm->size[5]);
1075 			info->xfer_page_cache_end = USB_ADD_BYTES(buf, parm->size[2]);
1076 
1077 			cv_init(&info->cv_drain, "WDRAIN");
1078 
1079 			info->xfer_mtx = xfer_mtx;
1080 #if USB_HAVE_BUSDMA
1081 			usb_dma_tag_setup(&info->dma_parent_tag,
1082 			    parm->dma_tag_p, udev->bus->dma_parent_tag[0].tag,
1083 			    xfer_mtx, &usb_bdma_done_event, udev->bus->dma_bits,
1084 			    parm->dma_tag_max);
1085 #endif
1086 
1087 			info->bus = udev->bus;
1088 			info->udev = udev;
1089 
1090 			TAILQ_INIT(&info->done_q.head);
1091 			info->done_q.command = &usbd_callback_wrapper;
1092 #if USB_HAVE_BUSDMA
1093 			TAILQ_INIT(&info->dma_q.head);
1094 			info->dma_q.command = &usb_bdma_work_loop;
1095 #endif
1096 			info->done_m[0].hdr.pm_callback = &usb_callback_proc;
1097 			info->done_m[0].xroot = info;
1098 			info->done_m[1].hdr.pm_callback = &usb_callback_proc;
1099 			info->done_m[1].xroot = info;
1100 
1101 			/*
1102 			 * In device side mode control endpoint
1103 			 * requests need to run from a separate
1104 			 * context, else there is a chance of
1105 			 * deadlock!
1106 			 */
1107 			if (setup_start == usb_control_ep_cfg ||
1108 			    setup_start == usb_control_ep_quirk_cfg)
1109 				info->done_p =
1110 				    USB_BUS_CONTROL_XFER_PROC(udev->bus);
1111 			else if (xfer_mtx == &Giant)
1112 				info->done_p =
1113 				    USB_BUS_GIANT_PROC(udev->bus);
1114 			else if (usbd_transfer_setup_has_bulk(setup_start, n_setup))
1115 				info->done_p =
1116 				    USB_BUS_NON_GIANT_BULK_PROC(udev->bus);
1117 			else
1118 				info->done_p =
1119 				    USB_BUS_NON_GIANT_ISOC_PROC(udev->bus);
1120 		}
1121 		/* reset sizes */
1122 
1123 		parm->size[0] = 0;
1124 		parm->buf = buf;
1125 		parm->size[0] += sizeof(info[0]);
1126 
1127 		for (setup = setup_start, n = 0;
1128 		    setup != setup_end; setup++, n++) {
1129 			/* skip USB transfers without callbacks: */
1130 			if (setup->callback == NULL) {
1131 				continue;
1132 			}
1133 			/* see if there is a matching endpoint */
1134 			ep = usbd_get_endpoint(udev,
1135 			    ifaces[setup->if_index], setup);
1136 
1137 			/*
1138 			 * Check that the USB PIPE is valid and that
1139 			 * the endpoint mode is proper.
1140 			 *
1141 			 * Make sure we don't allocate a streams
1142 			 * transfer when such a combination is not
1143 			 * valid.
1144 			 */
1145 			if ((ep == NULL) || (ep->methods == NULL) ||
1146 			    ((ep->ep_mode != USB_EP_MODE_STREAMS) &&
1147 			    (ep->ep_mode != USB_EP_MODE_DEFAULT)) ||
1148 			    (setup->stream_id != 0 &&
1149 			    (setup->stream_id >= USB_MAX_EP_STREAMS ||
1150 			    (ep->ep_mode != USB_EP_MODE_STREAMS)))) {
1151 				if (setup->flags.no_pipe_ok)
1152 					continue;
1153 				if ((setup->usb_mode != USB_MODE_DUAL) &&
1154 				    (setup->usb_mode != udev->flags.usb_mode))
1155 					continue;
1156 				parm->err = USB_ERR_NO_PIPE;
1157 				goto done;
1158 			}
1159 
1160 			/* align data properly */
1161 			parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1162 
1163 			/* store current setup pointer */
1164 			parm->curr_setup = setup;
1165 
1166 			if (buf) {
1167 				/*
1168 				 * Common initialization of the
1169 				 * "usb_xfer" structure.
1170 				 */
1171 				xfer = USB_ADD_BYTES(buf, parm->size[0]);
1172 				xfer->address = udev->address;
1173 				xfer->priv_sc = priv_sc;
1174 				xfer->xroot = info;
1175 
1176 				usb_callout_init_mtx(&xfer->timeout_handle,
1177 				    &udev->bus->bus_mtx, 0);
1178 			} else {
1179 				/*
1180 				 * Setup a dummy xfer, hence we are
1181 				 * writing to the "usb_xfer"
1182 				 * structure pointed to by "xfer"
1183 				 * before we have allocated any
1184 				 * memory:
1185 				 */
1186 				xfer = &udev->scratch.xfer_setup[0].dummy;
1187 				memset(xfer, 0, sizeof(*xfer));
1188 				refcount++;
1189 			}
1190 
1191 			/* set transfer endpoint pointer */
1192 			xfer->endpoint = ep;
1193 
1194 			/* set transfer stream ID */
1195 			xfer->stream_id = setup->stream_id;
1196 
1197 			parm->size[0] += sizeof(xfer[0]);
1198 			parm->methods = xfer->endpoint->methods;
1199 			parm->curr_xfer = xfer;
1200 
1201 			/*
1202 			 * Call the Host or Device controller transfer
1203 			 * setup routine:
1204 			 */
1205 			(udev->bus->methods->xfer_setup) (parm);
1206 
1207 			/* check for error */
1208 			if (parm->err)
1209 				goto done;
1210 
1211 			if (buf) {
1212 				/*
1213 				 * Increment the endpoint refcount. This
1214 				 * basically prevents setting a new
1215 				 * configuration and alternate setting
1216 				 * when USB transfers are in use on
1217 				 * the given interface. Search the USB
1218 				 * code for "endpoint->refcount_alloc" if you
1219 				 * want more information.
1220 				 */
1221 				USB_BUS_LOCK(info->bus);
1222 				if (xfer->endpoint->refcount_alloc >= USB_EP_REF_MAX)
1223 					parm->err = USB_ERR_INVAL;
1224 
1225 				xfer->endpoint->refcount_alloc++;
1226 
1227 				if (xfer->endpoint->refcount_alloc == 0)
1228 					panic("usbd_transfer_setup(): Refcount wrapped to zero\n");
1229 				USB_BUS_UNLOCK(info->bus);
1230 
1231 				/*
1232 				 * Whenever we set ppxfer[] then we
1233 				 * also need to increment the
1234 				 * "setup_refcount":
1235 				 */
1236 				info->setup_refcount++;
1237 
1238 				/*
1239 				 * Transfer is successfully setup and
1240 				 * can be used:
1241 				 */
1242 				ppxfer[n] = xfer;
1243 			}
1244 
1245 			/* check for error */
1246 			if (parm->err)
1247 				goto done;
1248 		}
1249 
1250 		if (buf != NULL || parm->err != 0)
1251 			goto done;
1252 
1253 		/* if no transfers, nothing to do */
1254 		if (refcount == 0)
1255 			goto done;
1256 
1257 		/* align data properly */
1258 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1259 
1260 		/* store offset temporarily */
1261 		parm->size[1] = parm->size[0];
1262 
1263 		/*
1264 		 * The number of DMA tags required depends on
1265 		 * the number of endpoints. The current estimate
1266 		 * for maximum number of DMA tags per endpoint
1267 		 * is three:
1268 		 * 1) for loading memory
1269 		 * 2) for allocating memory
1270 		 * 3) for fixing memory [UHCI]
1271 		 */
1272 		parm->dma_tag_max += 3 * MIN(n_setup, USB_EP_MAX);
1273 
1274 		/*
1275 		 * DMA tags for QH, TD, Data and more.
1276 		 */
1277 		parm->dma_tag_max += 8;
1278 
1279 		parm->dma_tag_p += parm->dma_tag_max;
1280 
1281 		parm->size[0] += ((uint8_t *)parm->dma_tag_p) -
1282 		    ((uint8_t *)0);
1283 
1284 		/* align data properly */
1285 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1286 
1287 		/* store offset temporarily */
1288 		parm->size[3] = parm->size[0];
1289 
1290 		parm->size[0] += ((uint8_t *)parm->dma_page_ptr) -
1291 		    ((uint8_t *)0);
1292 
1293 		/* align data properly */
1294 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1295 
1296 		/* store offset temporarily */
1297 		parm->size[4] = parm->size[0];
1298 
1299 		parm->size[0] += ((uint8_t *)parm->dma_page_cache_ptr) -
1300 		    ((uint8_t *)0);
1301 
1302 		/* store end offset temporarily */
1303 		parm->size[5] = parm->size[0];
1304 
1305 		parm->size[0] += ((uint8_t *)parm->xfer_page_cache_ptr) -
1306 		    ((uint8_t *)0);
1307 
1308 		/* store end offset temporarily */
1309 
1310 		parm->size[2] = parm->size[0];
1311 
1312 		/* align data properly */
1313 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1314 
1315 		parm->size[6] = parm->size[0];
1316 
1317 		parm->size[0] += ((uint8_t *)parm->xfer_length_ptr) -
1318 		    ((uint8_t *)0);
1319 
1320 		/* align data properly */
1321 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1322 
1323 		/* allocate zeroed memory */
1324 		buf = malloc(parm->size[0], M_USB, M_WAITOK | M_ZERO);
1325 #if (USB_HAVE_MALLOC_WAITOK == 0)
1326 		if (buf == NULL) {
1327 			parm->err = USB_ERR_NOMEM;
1328 			DPRINTFN(0, "cannot allocate memory block for "
1329 			    "configuration (%d bytes)\n",
1330 			    parm->size[0]);
1331 			goto done;
1332                 }
1333 #endif
1334 		parm->dma_tag_p = USB_ADD_BYTES(buf, parm->size[1]);
1335 		parm->dma_page_ptr = USB_ADD_BYTES(buf, parm->size[3]);
1336 		parm->dma_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[4]);
1337 		parm->xfer_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[5]);
1338 		parm->xfer_length_ptr = USB_ADD_BYTES(buf, parm->size[6]);
1339 	}
1340 
1341 done:
1342 	if (buf) {
1343 		if (info->setup_refcount == 0) {
1344 			/*
1345 			 * "usbd_transfer_unsetup_sub" will unlock
1346 			 * the bus mutex before returning !
1347 			 */
1348 			USB_BUS_LOCK(info->bus);
1349 
1350 			/* something went wrong */
1351 			usbd_transfer_unsetup_sub(info, 0);
1352 		}
1353 	}
1354 
1355 	/* check if any errors happened */
1356 	if (parm->err)
1357 		usbd_transfer_unsetup(ppxfer, n_setup);
1358 
1359 	error = parm->err;
1360 
1361 	if (do_unlock)
1362 		usbd_ctrl_unlock(udev);
1363 
1364 	return (error);
1365 }
1366 
1367 /*------------------------------------------------------------------------*
1368  *	usbd_transfer_unsetup_sub - factored out code
1369  *------------------------------------------------------------------------*/
1370 static void
1371 usbd_transfer_unsetup_sub(struct usb_xfer_root *info, uint8_t needs_delay)
1372 {
1373 #if USB_HAVE_BUSDMA
1374 	struct usb_page_cache *pc;
1375 #endif
1376 
1377 	USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
1378 
1379 	/* wait for any outstanding DMA operations */
1380 
1381 	if (needs_delay) {
1382 		usb_timeout_t temp;
1383 		temp = usbd_get_dma_delay(info->udev);
1384 		if (temp != 0) {
1385 			usb_pause_mtx(&info->bus->bus_mtx,
1386 			    USB_MS_TO_TICKS(temp));
1387 		}
1388 	}
1389 
1390 	/* make sure that our done messages are not queued anywhere */
1391 	usb_proc_mwait(info->done_p, &info->done_m[0], &info->done_m[1]);
1392 
1393 	USB_BUS_UNLOCK(info->bus);
1394 
1395 #if USB_HAVE_BUSDMA
1396 	/* free DMA'able memory, if any */
1397 	pc = info->dma_page_cache_start;
1398 	while (pc != info->dma_page_cache_end) {
1399 		usb_pc_free_mem(pc);
1400 		pc++;
1401 	}
1402 
1403 	/* free DMA maps in all "xfer->frbuffers" */
1404 	pc = info->xfer_page_cache_start;
1405 	while (pc != info->xfer_page_cache_end) {
1406 		usb_pc_dmamap_destroy(pc);
1407 		pc++;
1408 	}
1409 
1410 	/* free all DMA tags */
1411 	usb_dma_tag_unsetup(&info->dma_parent_tag);
1412 #endif
1413 
1414 	cv_destroy(&info->cv_drain);
1415 
1416 	/*
1417 	 * free the "memory_base" last, hence the "info" structure is
1418 	 * contained within the "memory_base"!
1419 	 */
1420 	free(info->memory_base, M_USB);
1421 }
1422 
1423 /*------------------------------------------------------------------------*
1424  *	usbd_transfer_unsetup - unsetup/free an array of USB transfers
1425  *
1426  * NOTE: All USB transfers in progress will get called back passing
1427  * the error code "USB_ERR_CANCELLED" before this function
1428  * returns.
1429  *------------------------------------------------------------------------*/
1430 void
1431 usbd_transfer_unsetup(struct usb_xfer **pxfer, uint16_t n_setup)
1432 {
1433 	struct usb_xfer *xfer;
1434 	struct usb_xfer_root *info;
1435 	uint8_t needs_delay = 0;
1436 
1437 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1438 	    "usbd_transfer_unsetup can sleep!");
1439 
1440 	while (n_setup--) {
1441 		xfer = pxfer[n_setup];
1442 
1443 		if (xfer == NULL)
1444 			continue;
1445 
1446 		info = xfer->xroot;
1447 
1448 		USB_XFER_LOCK(xfer);
1449 		USB_BUS_LOCK(info->bus);
1450 
1451 		/*
1452 		 * HINT: when you start/stop a transfer, it might be a
1453 		 * good idea to directly use the "pxfer[]" structure:
1454 		 *
1455 		 * usbd_transfer_start(sc->pxfer[0]);
1456 		 * usbd_transfer_stop(sc->pxfer[0]);
1457 		 *
1458 		 * That way, if your code has many parts that will not
1459 		 * stop running under the same lock, in other words
1460 		 * "xfer_mtx", the usbd_transfer_start and
1461 		 * usbd_transfer_stop functions will simply return
1462 		 * when they detect a NULL pointer argument.
1463 		 *
1464 		 * To avoid any races we clear the "pxfer[]" pointer
1465 		 * while holding the private mutex of the driver:
1466 		 */
1467 		pxfer[n_setup] = NULL;
1468 
1469 		USB_BUS_UNLOCK(info->bus);
1470 		USB_XFER_UNLOCK(xfer);
1471 
1472 		usbd_transfer_drain(xfer);
1473 
1474 #if USB_HAVE_BUSDMA
1475 		if (xfer->flags_int.bdma_enable)
1476 			needs_delay = 1;
1477 #endif
1478 		/*
1479 		 * NOTE: default endpoint does not have an
1480 		 * interface, even if endpoint->iface_index == 0
1481 		 */
1482 		USB_BUS_LOCK(info->bus);
1483 		xfer->endpoint->refcount_alloc--;
1484 		USB_BUS_UNLOCK(info->bus);
1485 
1486 		usb_callout_drain(&xfer->timeout_handle);
1487 
1488 		USB_BUS_LOCK(info->bus);
1489 
1490 		USB_ASSERT(info->setup_refcount != 0, ("Invalid setup "
1491 		    "reference count\n"));
1492 
1493 		info->setup_refcount--;
1494 
1495 		if (info->setup_refcount == 0) {
1496 			usbd_transfer_unsetup_sub(info,
1497 			    needs_delay);
1498 		} else {
1499 			USB_BUS_UNLOCK(info->bus);
1500 		}
1501 	}
1502 }
1503 
1504 /*------------------------------------------------------------------------*
1505  *	usbd_control_transfer_init - factored out code
1506  *
1507  * In USB Device Mode we have to wait for the SETUP packet which
1508  * containst the "struct usb_device_request" structure, before we can
1509  * transfer any data. In USB Host Mode we already have the SETUP
1510  * packet at the moment the USB transfer is started. This leads us to
1511  * having to setup the USB transfer at two different places in
1512  * time. This function just contains factored out control transfer
1513  * initialisation code, so that we don't duplicate the code.
1514  *------------------------------------------------------------------------*/
1515 static void
1516 usbd_control_transfer_init(struct usb_xfer *xfer)
1517 {
1518 	struct usb_device_request req;
1519 
1520 	/* copy out the USB request header */
1521 
1522 	usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1523 
1524 	/* setup remainder */
1525 
1526 	xfer->flags_int.control_rem = UGETW(req.wLength);
1527 
1528 	/* copy direction to endpoint variable */
1529 
1530 	xfer->endpointno &= ~(UE_DIR_IN | UE_DIR_OUT);
1531 	xfer->endpointno |=
1532 	    (req.bmRequestType & UT_READ) ? UE_DIR_IN : UE_DIR_OUT;
1533 }
1534 
1535 /*------------------------------------------------------------------------*
1536  *	usbd_control_transfer_did_data
1537  *
1538  * This function returns non-zero if a control endpoint has
1539  * transferred the first DATA packet after the SETUP packet.
1540  * Else it returns zero.
1541  *------------------------------------------------------------------------*/
1542 static uint8_t
1543 usbd_control_transfer_did_data(struct usb_xfer *xfer)
1544 {
1545 	struct usb_device_request req;
1546 
1547 	/* SETUP packet is not yet sent */
1548 	if (xfer->flags_int.control_hdr != 0)
1549 		return (0);
1550 
1551 	/* copy out the USB request header */
1552 	usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1553 
1554 	/* compare remainder to the initial value */
1555 	return (xfer->flags_int.control_rem != UGETW(req.wLength));
1556 }
1557 
1558 /*------------------------------------------------------------------------*
1559  *	usbd_setup_ctrl_transfer
1560  *
1561  * This function handles initialisation of control transfers. Control
1562  * transfers are special in that regard that they can both transmit
1563  * and receive data.
1564  *
1565  * Return values:
1566  *    0: Success
1567  * Else: Failure
1568  *------------------------------------------------------------------------*/
1569 static int
1570 usbd_setup_ctrl_transfer(struct usb_xfer *xfer)
1571 {
1572 	usb_frlength_t len;
1573 
1574 	/* Check for control endpoint stall */
1575 	if (xfer->flags.stall_pipe && xfer->flags_int.control_act) {
1576 		/* the control transfer is no longer active */
1577 		xfer->flags_int.control_stall = 1;
1578 		xfer->flags_int.control_act = 0;
1579 	} else {
1580 		/* don't stall control transfer by default */
1581 		xfer->flags_int.control_stall = 0;
1582 	}
1583 
1584 	/* Check for invalid number of frames */
1585 	if (xfer->nframes > 2) {
1586 		/*
1587 		 * If you need to split a control transfer, you
1588 		 * have to do one part at a time. Only with
1589 		 * non-control transfers you can do multiple
1590 		 * parts a time.
1591 		 */
1592 		DPRINTFN(0, "Too many frames: %u\n",
1593 		    (unsigned)xfer->nframes);
1594 		goto error;
1595 	}
1596 
1597 	/*
1598          * Check if there is a control
1599          * transfer in progress:
1600          */
1601 	if (xfer->flags_int.control_act) {
1602 		if (xfer->flags_int.control_hdr) {
1603 			/* clear send header flag */
1604 
1605 			xfer->flags_int.control_hdr = 0;
1606 
1607 			/* setup control transfer */
1608 			if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1609 				usbd_control_transfer_init(xfer);
1610 			}
1611 		}
1612 		/* get data length */
1613 
1614 		len = xfer->sumlen;
1615 
1616 	} else {
1617 		/* the size of the SETUP structure is hardcoded ! */
1618 
1619 		if (xfer->frlengths[0] != sizeof(struct usb_device_request)) {
1620 			DPRINTFN(0, "Wrong framelength %u != %zu\n",
1621 			    xfer->frlengths[0], sizeof(struct
1622 			    usb_device_request));
1623 			goto error;
1624 		}
1625 		/* check USB mode */
1626 		if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1627 			/* check number of frames */
1628 			if (xfer->nframes != 1) {
1629 				/*
1630 			         * We need to receive the setup
1631 			         * message first so that we know the
1632 			         * data direction!
1633 			         */
1634 				DPRINTF("Misconfigured transfer\n");
1635 				goto error;
1636 			}
1637 			/*
1638 			 * Set a dummy "control_rem" value.  This
1639 			 * variable will be overwritten later by a
1640 			 * call to "usbd_control_transfer_init()" !
1641 			 */
1642 			xfer->flags_int.control_rem = 0xFFFF;
1643 		} else {
1644 			/* setup "endpoint" and "control_rem" */
1645 
1646 			usbd_control_transfer_init(xfer);
1647 		}
1648 
1649 		/* set transfer-header flag */
1650 
1651 		xfer->flags_int.control_hdr = 1;
1652 
1653 		/* get data length */
1654 
1655 		len = (xfer->sumlen - sizeof(struct usb_device_request));
1656 	}
1657 
1658 	/* update did data flag */
1659 
1660 	xfer->flags_int.control_did_data =
1661 	    usbd_control_transfer_did_data(xfer);
1662 
1663 	/* check if there is a length mismatch */
1664 
1665 	if (len > xfer->flags_int.control_rem) {
1666 		DPRINTFN(0, "Length (%d) greater than "
1667 		    "remaining length (%d)\n", len,
1668 		    xfer->flags_int.control_rem);
1669 		goto error;
1670 	}
1671 	/* check if we are doing a short transfer */
1672 
1673 	if (xfer->flags.force_short_xfer) {
1674 		xfer->flags_int.control_rem = 0;
1675 	} else {
1676 		if ((len != xfer->max_data_length) &&
1677 		    (len != xfer->flags_int.control_rem) &&
1678 		    (xfer->nframes != 1)) {
1679 			DPRINTFN(0, "Short control transfer without "
1680 			    "force_short_xfer set\n");
1681 			goto error;
1682 		}
1683 		xfer->flags_int.control_rem -= len;
1684 	}
1685 
1686 	/* the status part is executed when "control_act" is 0 */
1687 
1688 	if ((xfer->flags_int.control_rem > 0) ||
1689 	    (xfer->flags.manual_status)) {
1690 		/* don't execute the STATUS stage yet */
1691 		xfer->flags_int.control_act = 1;
1692 
1693 		/* sanity check */
1694 		if ((!xfer->flags_int.control_hdr) &&
1695 		    (xfer->nframes == 1)) {
1696 			/*
1697 		         * This is not a valid operation!
1698 		         */
1699 			DPRINTFN(0, "Invalid parameter "
1700 			    "combination\n");
1701 			goto error;
1702 		}
1703 	} else {
1704 		/* time to execute the STATUS stage */
1705 		xfer->flags_int.control_act = 0;
1706 	}
1707 	return (0);			/* success */
1708 
1709 error:
1710 	return (1);			/* failure */
1711 }
1712 
1713 /*------------------------------------------------------------------------*
1714  *	usbd_transfer_submit - start USB hardware for the given transfer
1715  *
1716  * This function should only be called from the USB callback.
1717  *------------------------------------------------------------------------*/
1718 void
1719 usbd_transfer_submit(struct usb_xfer *xfer)
1720 {
1721 	struct usb_xfer_root *info;
1722 	struct usb_bus *bus;
1723 	usb_frcount_t x;
1724 
1725 	info = xfer->xroot;
1726 	bus = info->bus;
1727 
1728 	DPRINTF("xfer=%p, endpoint=%p, nframes=%d, dir=%s\n",
1729 	    xfer, xfer->endpoint, xfer->nframes, USB_GET_DATA_ISREAD(xfer) ?
1730 	    "read" : "write");
1731 
1732 #ifdef USB_DEBUG
1733 	if (USB_DEBUG_VAR > 0) {
1734 		USB_BUS_LOCK(bus);
1735 
1736 		usb_dump_endpoint(xfer->endpoint);
1737 
1738 		USB_BUS_UNLOCK(bus);
1739 	}
1740 #endif
1741 
1742 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1743 	USB_BUS_LOCK_ASSERT(bus, MA_NOTOWNED);
1744 
1745 	/* Only open the USB transfer once! */
1746 	if (!xfer->flags_int.open) {
1747 		xfer->flags_int.open = 1;
1748 
1749 		DPRINTF("open\n");
1750 
1751 		USB_BUS_LOCK(bus);
1752 		(xfer->endpoint->methods->open) (xfer);
1753 		USB_BUS_UNLOCK(bus);
1754 	}
1755 	/* set "transferring" flag */
1756 	xfer->flags_int.transferring = 1;
1757 
1758 #if USB_HAVE_POWERD
1759 	/* increment power reference */
1760 	usbd_transfer_power_ref(xfer, 1);
1761 #endif
1762 	/*
1763 	 * Check if the transfer is waiting on a queue, most
1764 	 * frequently the "done_q":
1765 	 */
1766 	if (xfer->wait_queue) {
1767 		USB_BUS_LOCK(bus);
1768 		usbd_transfer_dequeue(xfer);
1769 		USB_BUS_UNLOCK(bus);
1770 	}
1771 	/* clear "did_dma_delay" flag */
1772 	xfer->flags_int.did_dma_delay = 0;
1773 
1774 	/* clear "did_close" flag */
1775 	xfer->flags_int.did_close = 0;
1776 
1777 #if USB_HAVE_BUSDMA
1778 	/* clear "bdma_setup" flag */
1779 	xfer->flags_int.bdma_setup = 0;
1780 #endif
1781 	/* by default we cannot cancel any USB transfer immediately */
1782 	xfer->flags_int.can_cancel_immed = 0;
1783 
1784 	/* clear lengths and frame counts by default */
1785 	xfer->sumlen = 0;
1786 	xfer->actlen = 0;
1787 	xfer->aframes = 0;
1788 
1789 	/* clear any previous errors */
1790 	xfer->error = 0;
1791 
1792 	/* Check if the device is still alive */
1793 	if (info->udev->state < USB_STATE_POWERED) {
1794 		USB_BUS_LOCK(bus);
1795 		/*
1796 		 * Must return cancelled error code else
1797 		 * device drivers can hang.
1798 		 */
1799 		usbd_transfer_done(xfer, USB_ERR_CANCELLED);
1800 		USB_BUS_UNLOCK(bus);
1801 		return;
1802 	}
1803 
1804 	/* sanity check */
1805 	if (xfer->nframes == 0) {
1806 		if (xfer->flags.stall_pipe) {
1807 			/*
1808 			 * Special case - want to stall without transferring
1809 			 * any data:
1810 			 */
1811 			DPRINTF("xfer=%p nframes=0: stall "
1812 			    "or clear stall!\n", xfer);
1813 			USB_BUS_LOCK(bus);
1814 			xfer->flags_int.can_cancel_immed = 1;
1815 			/* start the transfer */
1816 			usb_command_wrapper(&xfer->endpoint->
1817 			    endpoint_q[xfer->stream_id], xfer);
1818 			USB_BUS_UNLOCK(bus);
1819 			return;
1820 		}
1821 		USB_BUS_LOCK(bus);
1822 		usbd_transfer_done(xfer, USB_ERR_INVAL);
1823 		USB_BUS_UNLOCK(bus);
1824 		return;
1825 	}
1826 	/* compute some variables */
1827 
1828 	for (x = 0; x != xfer->nframes; x++) {
1829 		/* make a copy of the frlenghts[] */
1830 		xfer->frlengths[x + xfer->max_frame_count] = xfer->frlengths[x];
1831 		/* compute total transfer length */
1832 		xfer->sumlen += xfer->frlengths[x];
1833 		if (xfer->sumlen < xfer->frlengths[x]) {
1834 			/* length wrapped around */
1835 			USB_BUS_LOCK(bus);
1836 			usbd_transfer_done(xfer, USB_ERR_INVAL);
1837 			USB_BUS_UNLOCK(bus);
1838 			return;
1839 		}
1840 	}
1841 
1842 	/* clear some internal flags */
1843 
1844 	xfer->flags_int.short_xfer_ok = 0;
1845 	xfer->flags_int.short_frames_ok = 0;
1846 
1847 	/* check if this is a control transfer */
1848 
1849 	if (xfer->flags_int.control_xfr) {
1850 		if (usbd_setup_ctrl_transfer(xfer)) {
1851 			USB_BUS_LOCK(bus);
1852 			usbd_transfer_done(xfer, USB_ERR_STALLED);
1853 			USB_BUS_UNLOCK(bus);
1854 			return;
1855 		}
1856 	}
1857 	/*
1858 	 * Setup filtered version of some transfer flags,
1859 	 * in case of data read direction
1860 	 */
1861 	if (USB_GET_DATA_ISREAD(xfer)) {
1862 		if (xfer->flags.short_frames_ok) {
1863 			xfer->flags_int.short_xfer_ok = 1;
1864 			xfer->flags_int.short_frames_ok = 1;
1865 		} else if (xfer->flags.short_xfer_ok) {
1866 			xfer->flags_int.short_xfer_ok = 1;
1867 
1868 			/* check for control transfer */
1869 			if (xfer->flags_int.control_xfr) {
1870 				/*
1871 				 * 1) Control transfers do not support
1872 				 * reception of multiple short USB
1873 				 * frames in host mode and device side
1874 				 * mode, with exception of:
1875 				 *
1876 				 * 2) Due to sometimes buggy device
1877 				 * side firmware we need to do a
1878 				 * STATUS stage in case of short
1879 				 * control transfers in USB host mode.
1880 				 * The STATUS stage then becomes the
1881 				 * "alt_next" to the DATA stage.
1882 				 */
1883 				xfer->flags_int.short_frames_ok = 1;
1884 			}
1885 		}
1886 	}
1887 	/*
1888 	 * Check if BUS-DMA support is enabled and try to load virtual
1889 	 * buffers into DMA, if any:
1890 	 */
1891 #if USB_HAVE_BUSDMA
1892 	if (xfer->flags_int.bdma_enable) {
1893 		/* insert the USB transfer last in the BUS-DMA queue */
1894 		usb_command_wrapper(&xfer->xroot->dma_q, xfer);
1895 		return;
1896 	}
1897 #endif
1898 	/*
1899 	 * Enter the USB transfer into the Host Controller or
1900 	 * Device Controller schedule:
1901 	 */
1902 	usbd_pipe_enter(xfer);
1903 }
1904 
1905 /*------------------------------------------------------------------------*
1906  *	usbd_pipe_enter - factored out code
1907  *------------------------------------------------------------------------*/
1908 void
1909 usbd_pipe_enter(struct usb_xfer *xfer)
1910 {
1911 	struct usb_endpoint *ep;
1912 
1913 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1914 
1915 	USB_BUS_LOCK(xfer->xroot->bus);
1916 
1917 	ep = xfer->endpoint;
1918 
1919 	DPRINTF("enter\n");
1920 
1921 	/* the transfer can now be cancelled */
1922 	xfer->flags_int.can_cancel_immed = 1;
1923 
1924 	/* enter the transfer */
1925 	(ep->methods->enter) (xfer);
1926 
1927 	/* check for transfer error */
1928 	if (xfer->error) {
1929 		/* some error has happened */
1930 		usbd_transfer_done(xfer, 0);
1931 		USB_BUS_UNLOCK(xfer->xroot->bus);
1932 		return;
1933 	}
1934 
1935 	/* start the transfer */
1936 	usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], xfer);
1937 	USB_BUS_UNLOCK(xfer->xroot->bus);
1938 }
1939 
1940 /*------------------------------------------------------------------------*
1941  *	usbd_transfer_start - start an USB transfer
1942  *
1943  * NOTE: Calling this function more than one time will only
1944  *       result in a single transfer start, until the USB transfer
1945  *       completes.
1946  *------------------------------------------------------------------------*/
1947 void
1948 usbd_transfer_start(struct usb_xfer *xfer)
1949 {
1950 	if (xfer == NULL) {
1951 		/* transfer is gone */
1952 		return;
1953 	}
1954 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1955 
1956 	/* mark the USB transfer started */
1957 
1958 	if (!xfer->flags_int.started) {
1959 		/* lock the BUS lock to avoid races updating flags_int */
1960 		USB_BUS_LOCK(xfer->xroot->bus);
1961 		xfer->flags_int.started = 1;
1962 		USB_BUS_UNLOCK(xfer->xroot->bus);
1963 	}
1964 	/* check if the USB transfer callback is already transferring */
1965 
1966 	if (xfer->flags_int.transferring) {
1967 		return;
1968 	}
1969 	USB_BUS_LOCK(xfer->xroot->bus);
1970 	/* call the USB transfer callback */
1971 	usbd_callback_ss_done_defer(xfer);
1972 	USB_BUS_UNLOCK(xfer->xroot->bus);
1973 }
1974 
1975 /*------------------------------------------------------------------------*
1976  *	usbd_transfer_stop - stop an USB transfer
1977  *
1978  * NOTE: Calling this function more than one time will only
1979  *       result in a single transfer stop.
1980  * NOTE: When this function returns it is not safe to free nor
1981  *       reuse any DMA buffers. See "usbd_transfer_drain()".
1982  *------------------------------------------------------------------------*/
1983 void
1984 usbd_transfer_stop(struct usb_xfer *xfer)
1985 {
1986 	struct usb_endpoint *ep;
1987 
1988 	if (xfer == NULL) {
1989 		/* transfer is gone */
1990 		return;
1991 	}
1992 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1993 
1994 	/* check if the USB transfer was ever opened */
1995 
1996 	if (!xfer->flags_int.open) {
1997 		if (xfer->flags_int.started) {
1998 			/* nothing to do except clearing the "started" flag */
1999 			/* lock the BUS lock to avoid races updating flags_int */
2000 			USB_BUS_LOCK(xfer->xroot->bus);
2001 			xfer->flags_int.started = 0;
2002 			USB_BUS_UNLOCK(xfer->xroot->bus);
2003 		}
2004 		return;
2005 	}
2006 	/* try to stop the current USB transfer */
2007 
2008 	USB_BUS_LOCK(xfer->xroot->bus);
2009 	/* override any previous error */
2010 	xfer->error = USB_ERR_CANCELLED;
2011 
2012 	/*
2013 	 * Clear "open" and "started" when both private and USB lock
2014 	 * is locked so that we don't get a race updating "flags_int"
2015 	 */
2016 	xfer->flags_int.open = 0;
2017 	xfer->flags_int.started = 0;
2018 
2019 	/*
2020 	 * Check if we can cancel the USB transfer immediately.
2021 	 */
2022 	if (xfer->flags_int.transferring) {
2023 		if (xfer->flags_int.can_cancel_immed &&
2024 		    (!xfer->flags_int.did_close)) {
2025 			DPRINTF("close\n");
2026 			/*
2027 			 * The following will lead to an USB_ERR_CANCELLED
2028 			 * error code being passed to the USB callback.
2029 			 */
2030 			(xfer->endpoint->methods->close) (xfer);
2031 			/* only close once */
2032 			xfer->flags_int.did_close = 1;
2033 		} else {
2034 			/* need to wait for the next done callback */
2035 		}
2036 	} else {
2037 		DPRINTF("close\n");
2038 
2039 		/* close here and now */
2040 		(xfer->endpoint->methods->close) (xfer);
2041 
2042 		/*
2043 		 * Any additional DMA delay is done by
2044 		 * "usbd_transfer_unsetup()".
2045 		 */
2046 
2047 		/*
2048 		 * Special case. Check if we need to restart a blocked
2049 		 * endpoint.
2050 		 */
2051 		ep = xfer->endpoint;
2052 
2053 		/*
2054 		 * If the current USB transfer is completing we need
2055 		 * to start the next one:
2056 		 */
2057 		if (ep->endpoint_q[xfer->stream_id].curr == xfer) {
2058 			usb_command_wrapper(
2059 			    &ep->endpoint_q[xfer->stream_id], NULL);
2060 		}
2061 	}
2062 
2063 	USB_BUS_UNLOCK(xfer->xroot->bus);
2064 }
2065 
2066 /*------------------------------------------------------------------------*
2067  *	usbd_transfer_pending
2068  *
2069  * This function will check if an USB transfer is pending which is a
2070  * little bit complicated!
2071  * Return values:
2072  * 0: Not pending
2073  * 1: Pending: The USB transfer will receive a callback in the future.
2074  *------------------------------------------------------------------------*/
2075 uint8_t
2076 usbd_transfer_pending(struct usb_xfer *xfer)
2077 {
2078 	struct usb_xfer_root *info;
2079 	struct usb_xfer_queue *pq;
2080 
2081 	if (xfer == NULL) {
2082 		/* transfer is gone */
2083 		return (0);
2084 	}
2085 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2086 
2087 	if (xfer->flags_int.transferring) {
2088 		/* trivial case */
2089 		return (1);
2090 	}
2091 	USB_BUS_LOCK(xfer->xroot->bus);
2092 	if (xfer->wait_queue) {
2093 		/* we are waiting on a queue somewhere */
2094 		USB_BUS_UNLOCK(xfer->xroot->bus);
2095 		return (1);
2096 	}
2097 	info = xfer->xroot;
2098 	pq = &info->done_q;
2099 
2100 	if (pq->curr == xfer) {
2101 		/* we are currently scheduled for callback */
2102 		USB_BUS_UNLOCK(xfer->xroot->bus);
2103 		return (1);
2104 	}
2105 	/* we are not pending */
2106 	USB_BUS_UNLOCK(xfer->xroot->bus);
2107 	return (0);
2108 }
2109 
2110 /*------------------------------------------------------------------------*
2111  *	usbd_transfer_drain
2112  *
2113  * This function will stop the USB transfer and wait for any
2114  * additional BUS-DMA and HW-DMA operations to complete. Buffers that
2115  * are loaded into DMA can safely be freed or reused after that this
2116  * function has returned.
2117  *------------------------------------------------------------------------*/
2118 void
2119 usbd_transfer_drain(struct usb_xfer *xfer)
2120 {
2121 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2122 	    "usbd_transfer_drain can sleep!");
2123 
2124 	if (xfer == NULL) {
2125 		/* transfer is gone */
2126 		return;
2127 	}
2128 	if (xfer->xroot->xfer_mtx != &Giant) {
2129 		USB_XFER_LOCK_ASSERT(xfer, MA_NOTOWNED);
2130 	}
2131 	USB_XFER_LOCK(xfer);
2132 
2133 	usbd_transfer_stop(xfer);
2134 
2135 	while (usbd_transfer_pending(xfer) ||
2136 	    xfer->flags_int.doing_callback) {
2137 		/*
2138 		 * It is allowed that the callback can drop its
2139 		 * transfer mutex. In that case checking only
2140 		 * "usbd_transfer_pending()" is not enough to tell if
2141 		 * the USB transfer is fully drained. We also need to
2142 		 * check the internal "doing_callback" flag.
2143 		 */
2144 		xfer->flags_int.draining = 1;
2145 
2146 		/*
2147 		 * Wait until the current outstanding USB
2148 		 * transfer is complete !
2149 		 */
2150 		cv_wait(&xfer->xroot->cv_drain, xfer->xroot->xfer_mtx);
2151 	}
2152 	USB_XFER_UNLOCK(xfer);
2153 }
2154 
2155 struct usb_page_cache *
2156 usbd_xfer_get_frame(struct usb_xfer *xfer, usb_frcount_t frindex)
2157 {
2158 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2159 
2160 	return (&xfer->frbuffers[frindex]);
2161 }
2162 
2163 void *
2164 usbd_xfer_get_frame_buffer(struct usb_xfer *xfer, usb_frcount_t frindex)
2165 {
2166 	struct usb_page_search page_info;
2167 
2168 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2169 
2170 	usbd_get_page(&xfer->frbuffers[frindex], 0, &page_info);
2171 	return (page_info.buffer);
2172 }
2173 
2174 /*------------------------------------------------------------------------*
2175  *	usbd_xfer_get_fps_shift
2176  *
2177  * The following function is only useful for isochronous transfers. It
2178  * returns how many times the frame execution rate has been shifted
2179  * down.
2180  *
2181  * Return value:
2182  * Success: 0..3
2183  * Failure: 0
2184  *------------------------------------------------------------------------*/
2185 uint8_t
2186 usbd_xfer_get_fps_shift(struct usb_xfer *xfer)
2187 {
2188 	return (xfer->fps_shift);
2189 }
2190 
2191 usb_frlength_t
2192 usbd_xfer_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex)
2193 {
2194 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2195 
2196 	return (xfer->frlengths[frindex]);
2197 }
2198 
2199 /*------------------------------------------------------------------------*
2200  *	usbd_xfer_set_frame_data
2201  *
2202  * This function sets the pointer of the buffer that should
2203  * loaded directly into DMA for the given USB frame. Passing "ptr"
2204  * equal to NULL while the corresponding "frlength" is greater
2205  * than zero gives undefined results!
2206  *------------------------------------------------------------------------*/
2207 void
2208 usbd_xfer_set_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
2209     void *ptr, usb_frlength_t len)
2210 {
2211 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2212 
2213 	/* set virtual address to load and length */
2214 	xfer->frbuffers[frindex].buffer = ptr;
2215 	usbd_xfer_set_frame_len(xfer, frindex, len);
2216 }
2217 
2218 void
2219 usbd_xfer_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
2220     void **ptr, int *len)
2221 {
2222 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2223 
2224 	if (ptr != NULL)
2225 		*ptr = xfer->frbuffers[frindex].buffer;
2226 	if (len != NULL)
2227 		*len = xfer->frlengths[frindex];
2228 }
2229 
2230 /*------------------------------------------------------------------------*
2231  *	usbd_xfer_old_frame_length
2232  *
2233  * This function returns the framelength of the given frame at the
2234  * time the transfer was submitted. This function can be used to
2235  * compute the starting data pointer of the next isochronous frame
2236  * when an isochronous transfer has completed.
2237  *------------------------------------------------------------------------*/
2238 usb_frlength_t
2239 usbd_xfer_old_frame_length(struct usb_xfer *xfer, usb_frcount_t frindex)
2240 {
2241 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2242 
2243 	return (xfer->frlengths[frindex + xfer->max_frame_count]);
2244 }
2245 
2246 void
2247 usbd_xfer_status(struct usb_xfer *xfer, int *actlen, int *sumlen, int *aframes,
2248     int *nframes)
2249 {
2250 	if (actlen != NULL)
2251 		*actlen = xfer->actlen;
2252 	if (sumlen != NULL)
2253 		*sumlen = xfer->sumlen;
2254 	if (aframes != NULL)
2255 		*aframes = xfer->aframes;
2256 	if (nframes != NULL)
2257 		*nframes = xfer->nframes;
2258 }
2259 
2260 /*------------------------------------------------------------------------*
2261  *	usbd_xfer_set_frame_offset
2262  *
2263  * This function sets the frame data buffer offset relative to the beginning
2264  * of the USB DMA buffer allocated for this USB transfer.
2265  *------------------------------------------------------------------------*/
2266 void
2267 usbd_xfer_set_frame_offset(struct usb_xfer *xfer, usb_frlength_t offset,
2268     usb_frcount_t frindex)
2269 {
2270 	KASSERT(!xfer->flags.ext_buffer, ("Cannot offset data frame "
2271 	    "when the USB buffer is external\n"));
2272 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2273 
2274 	/* set virtual address to load */
2275 	xfer->frbuffers[frindex].buffer =
2276 	    USB_ADD_BYTES(xfer->local_buffer, offset);
2277 }
2278 
2279 void
2280 usbd_xfer_set_interval(struct usb_xfer *xfer, int i)
2281 {
2282 	xfer->interval = i;
2283 }
2284 
2285 void
2286 usbd_xfer_set_timeout(struct usb_xfer *xfer, int t)
2287 {
2288 	xfer->timeout = t;
2289 }
2290 
2291 void
2292 usbd_xfer_set_frames(struct usb_xfer *xfer, usb_frcount_t n)
2293 {
2294 	xfer->nframes = n;
2295 }
2296 
2297 usb_frcount_t
2298 usbd_xfer_max_frames(struct usb_xfer *xfer)
2299 {
2300 	return (xfer->max_frame_count);
2301 }
2302 
2303 usb_frlength_t
2304 usbd_xfer_max_len(struct usb_xfer *xfer)
2305 {
2306 	return (xfer->max_data_length);
2307 }
2308 
2309 usb_frlength_t
2310 usbd_xfer_max_framelen(struct usb_xfer *xfer)
2311 {
2312 	return (xfer->max_frame_size);
2313 }
2314 
2315 void
2316 usbd_xfer_set_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex,
2317     usb_frlength_t len)
2318 {
2319 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2320 
2321 	xfer->frlengths[frindex] = len;
2322 }
2323 
2324 /*------------------------------------------------------------------------*
2325  *	usb_callback_proc - factored out code
2326  *
2327  * This function performs USB callbacks.
2328  *------------------------------------------------------------------------*/
2329 static void
2330 usb_callback_proc(struct usb_proc_msg *_pm)
2331 {
2332 	struct usb_done_msg *pm = (void *)_pm;
2333 	struct usb_xfer_root *info = pm->xroot;
2334 
2335 	/* Change locking order */
2336 	USB_BUS_UNLOCK(info->bus);
2337 
2338 	/*
2339 	 * We exploit the fact that the mutex is the same for all
2340 	 * callbacks that will be called from this thread:
2341 	 */
2342 	USB_MTX_LOCK(info->xfer_mtx);
2343 	USB_BUS_LOCK(info->bus);
2344 
2345 	/* Continue where we lost track */
2346 	usb_command_wrapper(&info->done_q,
2347 	    info->done_q.curr);
2348 
2349 	USB_MTX_UNLOCK(info->xfer_mtx);
2350 }
2351 
2352 /*------------------------------------------------------------------------*
2353  *	usbd_callback_ss_done_defer
2354  *
2355  * This function will defer the start, stop and done callback to the
2356  * correct thread.
2357  *------------------------------------------------------------------------*/
2358 static void
2359 usbd_callback_ss_done_defer(struct usb_xfer *xfer)
2360 {
2361 	struct usb_xfer_root *info = xfer->xroot;
2362 	struct usb_xfer_queue *pq = &info->done_q;
2363 
2364 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2365 
2366 	if (pq->curr != xfer) {
2367 		usbd_transfer_enqueue(pq, xfer);
2368 	}
2369 	if (!pq->recurse_1) {
2370 		/*
2371 	         * We have to postpone the callback due to the fact we
2372 	         * will have a Lock Order Reversal, LOR, if we try to
2373 	         * proceed !
2374 	         */
2375 		(void) usb_proc_msignal(info->done_p,
2376 		    &info->done_m[0], &info->done_m[1]);
2377 	} else {
2378 		/* clear second recurse flag */
2379 		pq->recurse_2 = 0;
2380 	}
2381 	return;
2382 
2383 }
2384 
2385 /*------------------------------------------------------------------------*
2386  *	usbd_callback_wrapper
2387  *
2388  * This is a wrapper for USB callbacks. This wrapper does some
2389  * auto-magic things like figuring out if we can call the callback
2390  * directly from the current context or if we need to wakeup the
2391  * interrupt process.
2392  *------------------------------------------------------------------------*/
2393 static void
2394 usbd_callback_wrapper(struct usb_xfer_queue *pq)
2395 {
2396 	struct usb_xfer *xfer = pq->curr;
2397 	struct usb_xfer_root *info = xfer->xroot;
2398 
2399 	USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2400 	if ((pq->recurse_3 != 0 || mtx_owned(info->xfer_mtx) == 0) &&
2401 	    USB_IN_POLLING_MODE_FUNC() == 0) {
2402 		/*
2403 	       	 * Cases that end up here:
2404 		 *
2405 		 * 5) HW interrupt done callback or other source.
2406 		 * 6) HW completed transfer during callback
2407 		 */
2408 		DPRINTFN(3, "case 5 and 6\n");
2409 
2410 		/*
2411 	         * We have to postpone the callback due to the fact we
2412 	         * will have a Lock Order Reversal, LOR, if we try to
2413 	         * proceed!
2414 		 *
2415 		 * Postponing the callback also ensures that other USB
2416 		 * transfer queues get a chance.
2417 	         */
2418 		(void) usb_proc_msignal(info->done_p,
2419 		    &info->done_m[0], &info->done_m[1]);
2420 		return;
2421 	}
2422 	/*
2423 	 * Cases that end up here:
2424 	 *
2425 	 * 1) We are starting a transfer
2426 	 * 2) We are prematurely calling back a transfer
2427 	 * 3) We are stopping a transfer
2428 	 * 4) We are doing an ordinary callback
2429 	 */
2430 	DPRINTFN(3, "case 1-4\n");
2431 	/* get next USB transfer in the queue */
2432 	info->done_q.curr = NULL;
2433 
2434 	/* set flag in case of drain */
2435 	xfer->flags_int.doing_callback = 1;
2436 
2437 	USB_BUS_UNLOCK(info->bus);
2438 	USB_BUS_LOCK_ASSERT(info->bus, MA_NOTOWNED);
2439 
2440 	/* set correct USB state for callback */
2441 	if (!xfer->flags_int.transferring) {
2442 		xfer->usb_state = USB_ST_SETUP;
2443 		if (!xfer->flags_int.started) {
2444 			/* we got stopped before we even got started */
2445 			USB_BUS_LOCK(info->bus);
2446 			goto done;
2447 		}
2448 	} else {
2449 		if (usbd_callback_wrapper_sub(xfer)) {
2450 			/* the callback has been deferred */
2451 			USB_BUS_LOCK(info->bus);
2452 			goto done;
2453 		}
2454 #if USB_HAVE_POWERD
2455 		/* decrement power reference */
2456 		usbd_transfer_power_ref(xfer, -1);
2457 #endif
2458 		xfer->flags_int.transferring = 0;
2459 
2460 		if (xfer->error) {
2461 			xfer->usb_state = USB_ST_ERROR;
2462 		} else {
2463 			/* set transferred state */
2464 			xfer->usb_state = USB_ST_TRANSFERRED;
2465 #if USB_HAVE_BUSDMA
2466 			/* sync DMA memory, if any */
2467 			if (xfer->flags_int.bdma_enable &&
2468 			    (!xfer->flags_int.bdma_no_post_sync)) {
2469 				usb_bdma_post_sync(xfer);
2470 			}
2471 #endif
2472 		}
2473 	}
2474 
2475 #if USB_HAVE_PF
2476 	if (xfer->usb_state != USB_ST_SETUP) {
2477 		USB_BUS_LOCK(info->bus);
2478 		usbpf_xfertap(xfer, USBPF_XFERTAP_DONE);
2479 		USB_BUS_UNLOCK(info->bus);
2480 	}
2481 #endif
2482 	/* call processing routine */
2483 	(xfer->callback) (xfer, xfer->error);
2484 
2485 	/* pickup the USB mutex again */
2486 	USB_BUS_LOCK(info->bus);
2487 
2488 	/*
2489 	 * Check if we got started after that we got cancelled, but
2490 	 * before we managed to do the callback.
2491 	 */
2492 	if ((!xfer->flags_int.open) &&
2493 	    (xfer->flags_int.started) &&
2494 	    (xfer->usb_state == USB_ST_ERROR)) {
2495 		/* clear flag in case of drain */
2496 		xfer->flags_int.doing_callback = 0;
2497 		/* try to loop, but not recursivly */
2498 		usb_command_wrapper(&info->done_q, xfer);
2499 		return;
2500 	}
2501 
2502 done:
2503 	/* clear flag in case of drain */
2504 	xfer->flags_int.doing_callback = 0;
2505 
2506 	/*
2507 	 * Check if we are draining.
2508 	 */
2509 	if (xfer->flags_int.draining &&
2510 	    (!xfer->flags_int.transferring)) {
2511 		/* "usbd_transfer_drain()" is waiting for end of transfer */
2512 		xfer->flags_int.draining = 0;
2513 		cv_broadcast(&info->cv_drain);
2514 	}
2515 
2516 	/* do the next callback, if any */
2517 	usb_command_wrapper(&info->done_q,
2518 	    info->done_q.curr);
2519 }
2520 
2521 /*------------------------------------------------------------------------*
2522  *	usb_dma_delay_done_cb
2523  *
2524  * This function is called when the DMA delay has been exectuded, and
2525  * will make sure that the callback is called to complete the USB
2526  * transfer. This code path is usually only used when there is an USB
2527  * error like USB_ERR_CANCELLED.
2528  *------------------------------------------------------------------------*/
2529 void
2530 usb_dma_delay_done_cb(struct usb_xfer *xfer)
2531 {
2532 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2533 
2534 	DPRINTFN(3, "Completed %p\n", xfer);
2535 
2536 	/* queue callback for execution, again */
2537 	usbd_transfer_done(xfer, 0);
2538 }
2539 
2540 /*------------------------------------------------------------------------*
2541  *	usbd_transfer_dequeue
2542  *
2543  *  - This function is used to remove an USB transfer from a USB
2544  *  transfer queue.
2545  *
2546  *  - This function can be called multiple times in a row.
2547  *------------------------------------------------------------------------*/
2548 void
2549 usbd_transfer_dequeue(struct usb_xfer *xfer)
2550 {
2551 	struct usb_xfer_queue *pq;
2552 
2553 	pq = xfer->wait_queue;
2554 	if (pq) {
2555 		TAILQ_REMOVE(&pq->head, xfer, wait_entry);
2556 		xfer->wait_queue = NULL;
2557 	}
2558 }
2559 
2560 /*------------------------------------------------------------------------*
2561  *	usbd_transfer_enqueue
2562  *
2563  *  - This function is used to insert an USB transfer into a USB *
2564  *  transfer queue.
2565  *
2566  *  - This function can be called multiple times in a row.
2567  *------------------------------------------------------------------------*/
2568 void
2569 usbd_transfer_enqueue(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2570 {
2571 	/*
2572 	 * Insert the USB transfer into the queue, if it is not
2573 	 * already on a USB transfer queue:
2574 	 */
2575 	if (xfer->wait_queue == NULL) {
2576 		xfer->wait_queue = pq;
2577 		TAILQ_INSERT_TAIL(&pq->head, xfer, wait_entry);
2578 	}
2579 }
2580 
2581 /*------------------------------------------------------------------------*
2582  *	usbd_transfer_done
2583  *
2584  *  - This function is used to remove an USB transfer from the busdma,
2585  *  pipe or interrupt queue.
2586  *
2587  *  - This function is used to queue the USB transfer on the done
2588  *  queue.
2589  *
2590  *  - This function is used to stop any USB transfer timeouts.
2591  *------------------------------------------------------------------------*/
2592 void
2593 usbd_transfer_done(struct usb_xfer *xfer, usb_error_t error)
2594 {
2595 	struct usb_xfer_root *info = xfer->xroot;
2596 
2597 	USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2598 
2599 	DPRINTF("err=%s\n", usbd_errstr(error));
2600 
2601 	/*
2602 	 * If we are not transferring then just return.
2603 	 * This can happen during transfer cancel.
2604 	 */
2605 	if (!xfer->flags_int.transferring) {
2606 		DPRINTF("not transferring\n");
2607 		/* end of control transfer, if any */
2608 		xfer->flags_int.control_act = 0;
2609 		return;
2610 	}
2611 	/* only set transfer error, if not already set */
2612 	if (xfer->error == USB_ERR_NORMAL_COMPLETION)
2613 		xfer->error = error;
2614 
2615 	/* stop any callouts */
2616 	usb_callout_stop(&xfer->timeout_handle);
2617 
2618 	/*
2619 	 * If we are waiting on a queue, just remove the USB transfer
2620 	 * from the queue, if any. We should have the required locks
2621 	 * locked to do the remove when this function is called.
2622 	 */
2623 	usbd_transfer_dequeue(xfer);
2624 
2625 #if USB_HAVE_BUSDMA
2626 	if (mtx_owned(info->xfer_mtx)) {
2627 		struct usb_xfer_queue *pq;
2628 
2629 		/*
2630 		 * If the private USB lock is not locked, then we assume
2631 		 * that the BUS-DMA load stage has been passed:
2632 		 */
2633 		pq = &info->dma_q;
2634 
2635 		if (pq->curr == xfer) {
2636 			/* start the next BUS-DMA load, if any */
2637 			usb_command_wrapper(pq, NULL);
2638 		}
2639 	}
2640 #endif
2641 	/* keep some statistics */
2642 	if (xfer->error == USB_ERR_CANCELLED) {
2643 		info->udev->stats_cancelled.uds_requests
2644 		    [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2645 	} else if (xfer->error != USB_ERR_NORMAL_COMPLETION) {
2646 		info->udev->stats_err.uds_requests
2647 		    [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2648 	} else {
2649 		info->udev->stats_ok.uds_requests
2650 		    [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2651 	}
2652 
2653 	/* call the USB transfer callback */
2654 	usbd_callback_ss_done_defer(xfer);
2655 }
2656 
2657 /*------------------------------------------------------------------------*
2658  *	usbd_transfer_start_cb
2659  *
2660  * This function is called to start the USB transfer when
2661  * "xfer->interval" is greater than zero, and and the endpoint type is
2662  * BULK or CONTROL.
2663  *------------------------------------------------------------------------*/
2664 static void
2665 usbd_transfer_start_cb(void *arg)
2666 {
2667 	struct usb_xfer *xfer = arg;
2668 	struct usb_endpoint *ep = xfer->endpoint;
2669 
2670 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2671 
2672 	DPRINTF("start\n");
2673 
2674 #if USB_HAVE_PF
2675 	usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2676 #endif
2677 
2678 	/* the transfer can now be cancelled */
2679 	xfer->flags_int.can_cancel_immed = 1;
2680 
2681 	/* start USB transfer, if no error */
2682 	if (xfer->error == 0)
2683 		(ep->methods->start) (xfer);
2684 
2685 	/* check for transfer error */
2686 	if (xfer->error) {
2687 		/* some error has happened */
2688 		usbd_transfer_done(xfer, 0);
2689 	}
2690 }
2691 
2692 /*------------------------------------------------------------------------*
2693  *	usbd_xfer_set_zlp
2694  *
2695  * This function sets the USB transfers ZLP flag.
2696  *------------------------------------------------------------------------*/
2697 void
2698 usbd_xfer_set_zlp(struct usb_xfer *xfer)
2699 {
2700 	if (xfer == NULL) {
2701 		/* tearing down */
2702 		return;
2703 	}
2704 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2705 
2706 	/* avoid any races by locking the USB mutex */
2707 	USB_BUS_LOCK(xfer->xroot->bus);
2708 	xfer->flags.send_zlp = 1;
2709 	USB_BUS_UNLOCK(xfer->xroot->bus);
2710 }
2711 
2712 /*------------------------------------------------------------------------*
2713  *	usbd_xfer_get_and_clr_zlp
2714  *
2715  * This function gets and clears the USB transfers ZLP flag and
2716  * queues a zero-length USB transfer if the flag was set.
2717  *------------------------------------------------------------------------*/
2718 uint8_t
2719 usbd_xfer_get_and_clr_zlp(struct usb_xfer *xfer)
2720 {
2721 	uint8_t retval;
2722 
2723 	if (xfer == NULL) {
2724 		/* tearing down */
2725 		return (0);
2726 	}
2727 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2728 
2729 	retval = xfer->flags.send_zlp;
2730 
2731 	if (retval != 0) {
2732 		DPRINTFN(1, "Sending zero-length packet.\n");
2733 
2734 		/* avoid any races by locking the USB mutex */
2735 		USB_BUS_LOCK(xfer->xroot->bus);
2736 		xfer->flags.send_zlp = 0;
2737 		USB_BUS_UNLOCK(xfer->xroot->bus);
2738 
2739 		/* queue up a zero-length packet */
2740 		usbd_xfer_set_frame_len(xfer, 0, 0);
2741 		usbd_xfer_set_frames(xfer, 1);
2742 		usbd_transfer_submit(xfer);
2743 	}
2744 	return (retval);
2745 }
2746 
2747 /*------------------------------------------------------------------------*
2748  *	usbd_xfer_set_stall
2749  *
2750  * This function is used to set the stall flag outside the
2751  * callback. This function is NULL safe.
2752  *------------------------------------------------------------------------*/
2753 void
2754 usbd_xfer_set_stall(struct usb_xfer *xfer)
2755 {
2756 	if (xfer == NULL) {
2757 		/* tearing down */
2758 		return;
2759 	}
2760 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2761 
2762 	/* avoid any races by locking the USB mutex */
2763 	USB_BUS_LOCK(xfer->xroot->bus);
2764 	xfer->flags.stall_pipe = 1;
2765 	USB_BUS_UNLOCK(xfer->xroot->bus);
2766 }
2767 
2768 int
2769 usbd_xfer_is_stalled(struct usb_xfer *xfer)
2770 {
2771 	return (xfer->endpoint->is_stalled);
2772 }
2773 
2774 /*------------------------------------------------------------------------*
2775  *	usbd_transfer_clear_stall
2776  *
2777  * This function is used to clear the stall flag outside the
2778  * callback. This function is NULL safe.
2779  *------------------------------------------------------------------------*/
2780 void
2781 usbd_transfer_clear_stall(struct usb_xfer *xfer)
2782 {
2783 	if (xfer == NULL) {
2784 		/* tearing down */
2785 		return;
2786 	}
2787 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2788 
2789 	/* avoid any races by locking the USB mutex */
2790 	USB_BUS_LOCK(xfer->xroot->bus);
2791 	xfer->flags.stall_pipe = 0;
2792 	USB_BUS_UNLOCK(xfer->xroot->bus);
2793 }
2794 
2795 /*------------------------------------------------------------------------*
2796  *	usbd_pipe_start
2797  *
2798  * This function is used to add an USB transfer to the pipe transfer list.
2799  *------------------------------------------------------------------------*/
2800 void
2801 usbd_pipe_start(struct usb_xfer_queue *pq)
2802 {
2803 	struct usb_endpoint *ep;
2804 	struct usb_xfer *xfer;
2805 	uint8_t type;
2806 
2807 	xfer = pq->curr;
2808 	ep = xfer->endpoint;
2809 
2810 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2811 
2812 	/*
2813 	 * If the endpoint is already stalled we do nothing !
2814 	 */
2815 	if (ep->is_stalled) {
2816 		return;
2817 	}
2818 	/*
2819 	 * Check if we are supposed to stall the endpoint:
2820 	 */
2821 	if (xfer->flags.stall_pipe) {
2822 		struct usb_device *udev;
2823 		struct usb_xfer_root *info;
2824 
2825 		/* clear stall command */
2826 		xfer->flags.stall_pipe = 0;
2827 
2828 		/* get pointer to USB device */
2829 		info = xfer->xroot;
2830 		udev = info->udev;
2831 
2832 		/*
2833 		 * Only stall BULK and INTERRUPT endpoints.
2834 		 */
2835 		type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2836 		if ((type == UE_BULK) ||
2837 		    (type == UE_INTERRUPT)) {
2838 			uint8_t did_stall;
2839 
2840 			did_stall = 1;
2841 
2842 			if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2843 				(udev->bus->methods->set_stall) (
2844 				    udev, ep, &did_stall);
2845 			} else if (udev->ctrl_xfer[1]) {
2846 				info = udev->ctrl_xfer[1]->xroot;
2847 				usb_proc_msignal(
2848 				    USB_BUS_CS_PROC(info->bus),
2849 				    &udev->cs_msg[0], &udev->cs_msg[1]);
2850 			} else {
2851 				/* should not happen */
2852 				DPRINTFN(0, "No stall handler\n");
2853 			}
2854 			/*
2855 			 * Check if we should stall. Some USB hardware
2856 			 * handles set- and clear-stall in hardware.
2857 			 */
2858 			if (did_stall) {
2859 				/*
2860 				 * The transfer will be continued when
2861 				 * the clear-stall control endpoint
2862 				 * message is received.
2863 				 */
2864 				ep->is_stalled = 1;
2865 				return;
2866 			}
2867 		} else if (type == UE_ISOCHRONOUS) {
2868 			/*
2869 			 * Make sure any FIFO overflow or other FIFO
2870 			 * error conditions go away by resetting the
2871 			 * endpoint FIFO through the clear stall
2872 			 * method.
2873 			 */
2874 			if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2875 				(udev->bus->methods->clear_stall) (udev, ep);
2876 			}
2877 		}
2878 	}
2879 	/* Set or clear stall complete - special case */
2880 	if (xfer->nframes == 0) {
2881 		/* we are complete */
2882 		xfer->aframes = 0;
2883 		usbd_transfer_done(xfer, 0);
2884 		return;
2885 	}
2886 	/*
2887 	 * Handled cases:
2888 	 *
2889 	 * 1) Start the first transfer queued.
2890 	 *
2891 	 * 2) Re-start the current USB transfer.
2892 	 */
2893 	/*
2894 	 * Check if there should be any
2895 	 * pre transfer start delay:
2896 	 */
2897 	if (xfer->interval > 0) {
2898 		type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2899 		if ((type == UE_BULK) ||
2900 		    (type == UE_CONTROL)) {
2901 			usbd_transfer_timeout_ms(xfer,
2902 			    &usbd_transfer_start_cb,
2903 			    xfer->interval);
2904 			return;
2905 		}
2906 	}
2907 	DPRINTF("start\n");
2908 
2909 #if USB_HAVE_PF
2910 	usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2911 #endif
2912 	/* the transfer can now be cancelled */
2913 	xfer->flags_int.can_cancel_immed = 1;
2914 
2915 	/* start USB transfer, if no error */
2916 	if (xfer->error == 0)
2917 		(ep->methods->start) (xfer);
2918 
2919 	/* check for transfer error */
2920 	if (xfer->error) {
2921 		/* some error has happened */
2922 		usbd_transfer_done(xfer, 0);
2923 	}
2924 }
2925 
2926 /*------------------------------------------------------------------------*
2927  *	usbd_transfer_timeout_ms
2928  *
2929  * This function is used to setup a timeout on the given USB
2930  * transfer. If the timeout has been deferred the callback given by
2931  * "cb" will get called after "ms" milliseconds.
2932  *------------------------------------------------------------------------*/
2933 void
2934 usbd_transfer_timeout_ms(struct usb_xfer *xfer,
2935     void (*cb) (void *arg), usb_timeout_t ms)
2936 {
2937 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2938 
2939 	/* defer delay */
2940 	usb_callout_reset(&xfer->timeout_handle,
2941 	    USB_MS_TO_TICKS(ms) + USB_CALLOUT_ZERO_TICKS, cb, xfer);
2942 }
2943 
2944 /*------------------------------------------------------------------------*
2945  *	usbd_callback_wrapper_sub
2946  *
2947  *  - This function will update variables in an USB transfer after
2948  *  that the USB transfer is complete.
2949  *
2950  *  - This function is used to start the next USB transfer on the
2951  *  ep transfer queue, if any.
2952  *
2953  * NOTE: In some special cases the USB transfer will not be removed from
2954  * the pipe queue, but remain first. To enforce USB transfer removal call
2955  * this function passing the error code "USB_ERR_CANCELLED".
2956  *
2957  * Return values:
2958  * 0: Success.
2959  * Else: The callback has been deferred.
2960  *------------------------------------------------------------------------*/
2961 static uint8_t
2962 usbd_callback_wrapper_sub(struct usb_xfer *xfer)
2963 {
2964 	struct usb_endpoint *ep;
2965 	struct usb_bus *bus;
2966 	usb_frcount_t x;
2967 
2968 	bus = xfer->xroot->bus;
2969 
2970 	if ((!xfer->flags_int.open) &&
2971 	    (!xfer->flags_int.did_close)) {
2972 		DPRINTF("close\n");
2973 		USB_BUS_LOCK(bus);
2974 		(xfer->endpoint->methods->close) (xfer);
2975 		USB_BUS_UNLOCK(bus);
2976 		/* only close once */
2977 		xfer->flags_int.did_close = 1;
2978 		return (1);		/* wait for new callback */
2979 	}
2980 	/*
2981 	 * If we have a non-hardware induced error we
2982 	 * need to do the DMA delay!
2983 	 */
2984 	if (xfer->error != 0 && !xfer->flags_int.did_dma_delay &&
2985 	    (xfer->error == USB_ERR_CANCELLED ||
2986 	    xfer->error == USB_ERR_TIMEOUT ||
2987 	    bus->methods->start_dma_delay != NULL)) {
2988 		usb_timeout_t temp;
2989 
2990 		/* only delay once */
2991 		xfer->flags_int.did_dma_delay = 1;
2992 
2993 		/* we can not cancel this delay */
2994 		xfer->flags_int.can_cancel_immed = 0;
2995 
2996 		temp = usbd_get_dma_delay(xfer->xroot->udev);
2997 
2998 		DPRINTFN(3, "DMA delay, %u ms, "
2999 		    "on %p\n", temp, xfer);
3000 
3001 		if (temp != 0) {
3002 			USB_BUS_LOCK(bus);
3003 			/*
3004 			 * Some hardware solutions have dedicated
3005 			 * events when it is safe to free DMA'ed
3006 			 * memory. For the other hardware platforms we
3007 			 * use a static delay.
3008 			 */
3009 			if (bus->methods->start_dma_delay != NULL) {
3010 				(bus->methods->start_dma_delay) (xfer);
3011 			} else {
3012 				usbd_transfer_timeout_ms(xfer,
3013 				    (void (*)(void *))&usb_dma_delay_done_cb,
3014 				    temp);
3015 			}
3016 			USB_BUS_UNLOCK(bus);
3017 			return (1);	/* wait for new callback */
3018 		}
3019 	}
3020 	/* check actual number of frames */
3021 	if (xfer->aframes > xfer->nframes) {
3022 		if (xfer->error == 0) {
3023 			panic("%s: actual number of frames, %d, is "
3024 			    "greater than initial number of frames, %d\n",
3025 			    __FUNCTION__, xfer->aframes, xfer->nframes);
3026 		} else {
3027 			/* just set some valid value */
3028 			xfer->aframes = xfer->nframes;
3029 		}
3030 	}
3031 	/* compute actual length */
3032 	xfer->actlen = 0;
3033 
3034 	for (x = 0; x != xfer->aframes; x++) {
3035 		xfer->actlen += xfer->frlengths[x];
3036 	}
3037 
3038 	/*
3039 	 * Frames that were not transferred get zero actual length in
3040 	 * case the USB device driver does not check the actual number
3041 	 * of frames transferred, "xfer->aframes":
3042 	 */
3043 	for (; x < xfer->nframes; x++) {
3044 		usbd_xfer_set_frame_len(xfer, x, 0);
3045 	}
3046 
3047 	/* check actual length */
3048 	if (xfer->actlen > xfer->sumlen) {
3049 		if (xfer->error == 0) {
3050 			panic("%s: actual length, %d, is greater than "
3051 			    "initial length, %d\n",
3052 			    __FUNCTION__, xfer->actlen, xfer->sumlen);
3053 		} else {
3054 			/* just set some valid value */
3055 			xfer->actlen = xfer->sumlen;
3056 		}
3057 	}
3058 	DPRINTFN(1, "xfer=%p endpoint=%p sts=%d alen=%d, slen=%d, afrm=%d, nfrm=%d\n",
3059 	    xfer, xfer->endpoint, xfer->error, xfer->actlen, xfer->sumlen,
3060 	    xfer->aframes, xfer->nframes);
3061 
3062 	if (xfer->error) {
3063 		/* end of control transfer, if any */
3064 		xfer->flags_int.control_act = 0;
3065 
3066 #if USB_HAVE_TT_SUPPORT
3067 		switch (xfer->error) {
3068 		case USB_ERR_NORMAL_COMPLETION:
3069 		case USB_ERR_SHORT_XFER:
3070 		case USB_ERR_STALLED:
3071 		case USB_ERR_CANCELLED:
3072 			/* nothing to do */
3073 			break;
3074 		default:
3075 			/* try to reset the TT, if any */
3076 			USB_BUS_LOCK(bus);
3077 			uhub_tt_buffer_reset_async_locked(xfer->xroot->udev, xfer->endpoint);
3078 			USB_BUS_UNLOCK(bus);
3079 			break;
3080 		}
3081 #endif
3082 		/* check if we should block the execution queue */
3083 		if ((xfer->error != USB_ERR_CANCELLED) &&
3084 		    (xfer->flags.pipe_bof)) {
3085 			DPRINTFN(2, "xfer=%p: Block On Failure "
3086 			    "on endpoint=%p\n", xfer, xfer->endpoint);
3087 			goto done;
3088 		}
3089 	} else {
3090 		/* check for short transfers */
3091 		if (xfer->actlen < xfer->sumlen) {
3092 			/* end of control transfer, if any */
3093 			xfer->flags_int.control_act = 0;
3094 
3095 			if (!xfer->flags_int.short_xfer_ok) {
3096 				xfer->error = USB_ERR_SHORT_XFER;
3097 				if (xfer->flags.pipe_bof) {
3098 					DPRINTFN(2, "xfer=%p: Block On Failure on "
3099 					    "Short Transfer on endpoint %p.\n",
3100 					    xfer, xfer->endpoint);
3101 					goto done;
3102 				}
3103 			}
3104 		} else {
3105 			/*
3106 			 * Check if we are in the middle of a
3107 			 * control transfer:
3108 			 */
3109 			if (xfer->flags_int.control_act) {
3110 				DPRINTFN(5, "xfer=%p: Control transfer "
3111 				    "active on endpoint=%p\n", xfer, xfer->endpoint);
3112 				goto done;
3113 			}
3114 		}
3115 	}
3116 
3117 	ep = xfer->endpoint;
3118 
3119 	/*
3120 	 * If the current USB transfer is completing we need to start the
3121 	 * next one:
3122 	 */
3123 	USB_BUS_LOCK(bus);
3124 	if (ep->endpoint_q[xfer->stream_id].curr == xfer) {
3125 		usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], NULL);
3126 
3127 		if (ep->endpoint_q[xfer->stream_id].curr != NULL ||
3128 		    TAILQ_FIRST(&ep->endpoint_q[xfer->stream_id].head) != NULL) {
3129 			/* there is another USB transfer waiting */
3130 		} else {
3131 			/* this is the last USB transfer */
3132 			/* clear isochronous sync flag */
3133 			xfer->endpoint->is_synced = 0;
3134 		}
3135 	}
3136 	USB_BUS_UNLOCK(bus);
3137 done:
3138 	return (0);
3139 }
3140 
3141 /*------------------------------------------------------------------------*
3142  *	usb_command_wrapper
3143  *
3144  * This function is used to execute commands non-recursivly on an USB
3145  * transfer.
3146  *------------------------------------------------------------------------*/
3147 void
3148 usb_command_wrapper(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
3149 {
3150 	if (xfer) {
3151 		/*
3152 		 * If the transfer is not already processing,
3153 		 * queue it!
3154 		 */
3155 		if (pq->curr != xfer) {
3156 			usbd_transfer_enqueue(pq, xfer);
3157 			if (pq->curr != NULL) {
3158 				/* something is already processing */
3159 				DPRINTFN(6, "busy %p\n", pq->curr);
3160 				return;
3161 			}
3162 		}
3163 	} else {
3164 		/* Get next element in queue */
3165 		pq->curr = NULL;
3166 	}
3167 
3168 	if (!pq->recurse_1) {
3169 		/* clear third recurse flag */
3170 		pq->recurse_3 = 0;
3171 
3172 		do {
3173 			/* set two first recurse flags */
3174 			pq->recurse_1 = 1;
3175 			pq->recurse_2 = 1;
3176 
3177 			if (pq->curr == NULL) {
3178 				xfer = TAILQ_FIRST(&pq->head);
3179 				if (xfer) {
3180 					TAILQ_REMOVE(&pq->head, xfer,
3181 					    wait_entry);
3182 					xfer->wait_queue = NULL;
3183 					pq->curr = xfer;
3184 				} else {
3185 					break;
3186 				}
3187 			}
3188 			DPRINTFN(6, "cb %p (enter)\n", pq->curr);
3189 			(pq->command) (pq);
3190 			DPRINTFN(6, "cb %p (leave)\n", pq->curr);
3191 
3192 			/*
3193 			 * Set third recurse flag to indicate
3194 			 * recursion happened:
3195 			 */
3196 			pq->recurse_3 = 1;
3197 
3198 		} while (!pq->recurse_2);
3199 
3200 		/* clear first recurse flag */
3201 		pq->recurse_1 = 0;
3202 
3203 	} else {
3204 		/* clear second recurse flag */
3205 		pq->recurse_2 = 0;
3206 	}
3207 }
3208 
3209 /*------------------------------------------------------------------------*
3210  *	usbd_ctrl_transfer_setup
3211  *
3212  * This function is used to setup the default USB control endpoint
3213  * transfer.
3214  *------------------------------------------------------------------------*/
3215 void
3216 usbd_ctrl_transfer_setup(struct usb_device *udev)
3217 {
3218 	struct usb_xfer *xfer;
3219 	uint8_t no_resetup;
3220 	uint8_t iface_index;
3221 
3222 	/* check for root HUB */
3223 	if (udev->parent_hub == NULL)
3224 		return;
3225 repeat:
3226 
3227 	xfer = udev->ctrl_xfer[0];
3228 	if (xfer) {
3229 		USB_XFER_LOCK(xfer);
3230 		no_resetup =
3231 		    ((xfer->address == udev->address) &&
3232 		    (udev->ctrl_ep_desc.wMaxPacketSize[0] ==
3233 		    udev->ddesc.bMaxPacketSize));
3234 		if (udev->flags.usb_mode == USB_MODE_DEVICE) {
3235 			if (no_resetup) {
3236 				/*
3237 				 * NOTE: checking "xfer->address" and
3238 				 * starting the USB transfer must be
3239 				 * atomic!
3240 				 */
3241 				usbd_transfer_start(xfer);
3242 			}
3243 		}
3244 		USB_XFER_UNLOCK(xfer);
3245 	} else {
3246 		no_resetup = 0;
3247 	}
3248 
3249 	if (no_resetup) {
3250 		/*
3251 	         * All parameters are exactly the same like before.
3252 	         * Just return.
3253 	         */
3254 		return;
3255 	}
3256 	/*
3257 	 * Update wMaxPacketSize for the default control endpoint:
3258 	 */
3259 	udev->ctrl_ep_desc.wMaxPacketSize[0] =
3260 	    udev->ddesc.bMaxPacketSize;
3261 
3262 	/*
3263 	 * Unsetup any existing USB transfer:
3264 	 */
3265 	usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX);
3266 
3267 	/*
3268 	 * Reset clear stall error counter.
3269 	 */
3270 	udev->clear_stall_errors = 0;
3271 
3272 	/*
3273 	 * Try to setup a new USB transfer for the
3274 	 * default control endpoint:
3275 	 */
3276 	iface_index = 0;
3277 	if (usbd_transfer_setup(udev, &iface_index,
3278 	    udev->ctrl_xfer, udev->bus->control_ep_quirk ?
3279 	    usb_control_ep_quirk_cfg : usb_control_ep_cfg, USB_CTRL_XFER_MAX, NULL,
3280 	    &udev->device_mtx)) {
3281 		DPRINTFN(0, "could not setup default "
3282 		    "USB transfer\n");
3283 	} else {
3284 		goto repeat;
3285 	}
3286 }
3287 
3288 /*------------------------------------------------------------------------*
3289  *	usbd_clear_data_toggle - factored out code
3290  *
3291  * NOTE: the intention of this function is not to reset the hardware
3292  * data toggle.
3293  *------------------------------------------------------------------------*/
3294 void
3295 usbd_clear_stall_locked(struct usb_device *udev, struct usb_endpoint *ep)
3296 {
3297 	USB_BUS_LOCK_ASSERT(udev->bus, MA_OWNED);
3298 
3299 	/* check that we have a valid case */
3300 	if (udev->flags.usb_mode == USB_MODE_HOST &&
3301 	    udev->parent_hub != NULL &&
3302 	    udev->bus->methods->clear_stall != NULL &&
3303 	    ep->methods != NULL) {
3304 		(udev->bus->methods->clear_stall) (udev, ep);
3305 	}
3306 }
3307 
3308 /*------------------------------------------------------------------------*
3309  *	usbd_clear_data_toggle - factored out code
3310  *
3311  * NOTE: the intention of this function is not to reset the hardware
3312  * data toggle on the USB device side.
3313  *------------------------------------------------------------------------*/
3314 void
3315 usbd_clear_data_toggle(struct usb_device *udev, struct usb_endpoint *ep)
3316 {
3317 	DPRINTFN(5, "udev=%p endpoint=%p\n", udev, ep);
3318 
3319 	USB_BUS_LOCK(udev->bus);
3320 	ep->toggle_next = 0;
3321 	/* some hardware needs a callback to clear the data toggle */
3322 	usbd_clear_stall_locked(udev, ep);
3323 	USB_BUS_UNLOCK(udev->bus);
3324 }
3325 
3326 /*------------------------------------------------------------------------*
3327  *	usbd_clear_stall_callback - factored out clear stall callback
3328  *
3329  * Input parameters:
3330  *  xfer1: Clear Stall Control Transfer
3331  *  xfer2: Stalled USB Transfer
3332  *
3333  * This function is NULL safe.
3334  *
3335  * Return values:
3336  *   0: In progress
3337  *   Else: Finished
3338  *
3339  * Clear stall config example:
3340  *
3341  * static const struct usb_config my_clearstall =  {
3342  *	.type = UE_CONTROL,
3343  *	.endpoint = 0,
3344  *	.direction = UE_DIR_ANY,
3345  *	.interval = 50, //50 milliseconds
3346  *	.bufsize = sizeof(struct usb_device_request),
3347  *	.timeout = 1000, //1.000 seconds
3348  *	.callback = &my_clear_stall_callback, // **
3349  *	.usb_mode = USB_MODE_HOST,
3350  * };
3351  *
3352  * ** "my_clear_stall_callback" calls "usbd_clear_stall_callback"
3353  * passing the correct parameters.
3354  *------------------------------------------------------------------------*/
3355 uint8_t
3356 usbd_clear_stall_callback(struct usb_xfer *xfer1,
3357     struct usb_xfer *xfer2)
3358 {
3359 	struct usb_device_request req;
3360 
3361 	if (xfer2 == NULL) {
3362 		/* looks like we are tearing down */
3363 		DPRINTF("NULL input parameter\n");
3364 		return (0);
3365 	}
3366 	USB_XFER_LOCK_ASSERT(xfer1, MA_OWNED);
3367 	USB_XFER_LOCK_ASSERT(xfer2, MA_OWNED);
3368 
3369 	switch (USB_GET_STATE(xfer1)) {
3370 	case USB_ST_SETUP:
3371 
3372 		/*
3373 		 * pre-clear the data toggle to DATA0 ("umass.c" and
3374 		 * "ata-usb.c" depends on this)
3375 		 */
3376 
3377 		usbd_clear_data_toggle(xfer2->xroot->udev, xfer2->endpoint);
3378 
3379 		/* setup a clear-stall packet */
3380 
3381 		req.bmRequestType = UT_WRITE_ENDPOINT;
3382 		req.bRequest = UR_CLEAR_FEATURE;
3383 		USETW(req.wValue, UF_ENDPOINT_HALT);
3384 		req.wIndex[0] = xfer2->endpoint->edesc->bEndpointAddress;
3385 		req.wIndex[1] = 0;
3386 		USETW(req.wLength, 0);
3387 
3388 		/*
3389 		 * "usbd_transfer_setup_sub()" will ensure that
3390 		 * we have sufficient room in the buffer for
3391 		 * the request structure!
3392 		 */
3393 
3394 		/* copy in the transfer */
3395 
3396 		usbd_copy_in(xfer1->frbuffers, 0, &req, sizeof(req));
3397 
3398 		/* set length */
3399 		xfer1->frlengths[0] = sizeof(req);
3400 		xfer1->nframes = 1;
3401 
3402 		usbd_transfer_submit(xfer1);
3403 		return (0);
3404 
3405 	case USB_ST_TRANSFERRED:
3406 		break;
3407 
3408 	default:			/* Error */
3409 		if (xfer1->error == USB_ERR_CANCELLED) {
3410 			return (0);
3411 		}
3412 		break;
3413 	}
3414 	return (1);			/* Clear Stall Finished */
3415 }
3416 
3417 /*------------------------------------------------------------------------*
3418  *	usbd_transfer_poll
3419  *
3420  * The following function gets called from the USB keyboard driver and
3421  * UMASS when the system has panicked.
3422  *
3423  * NOTE: It is currently not possible to resume normal operation on
3424  * the USB controller which has been polled, due to clearing of the
3425  * "up_dsleep" and "up_msleep" flags.
3426  *------------------------------------------------------------------------*/
3427 void
3428 usbd_transfer_poll(struct usb_xfer **ppxfer, uint16_t max)
3429 {
3430 	struct usb_xfer *xfer;
3431 	struct usb_xfer_root *xroot;
3432 	struct usb_device *udev;
3433 	struct usb_proc_msg *pm;
3434 	struct usb_bus *bus;
3435 	uint16_t n;
3436 	uint16_t drop_bus_spin;
3437 	uint16_t drop_bus;
3438 	uint16_t drop_xfer;
3439 
3440 	for (n = 0; n != max; n++) {
3441 		/* Extra checks to avoid panic */
3442 		xfer = ppxfer[n];
3443 		if (xfer == NULL)
3444 			continue;	/* no USB transfer */
3445 		xroot = xfer->xroot;
3446 		if (xroot == NULL)
3447 			continue;	/* no USB root */
3448 		udev = xroot->udev;
3449 		if (udev == NULL)
3450 			continue;	/* no USB device */
3451 		bus = udev->bus;
3452 		if (bus == NULL)
3453 			continue;	/* no BUS structure */
3454 		if (bus->methods == NULL)
3455 			continue;	/* no BUS methods */
3456 		if (bus->methods->xfer_poll == NULL)
3457 			continue;	/* no poll method */
3458 
3459 		drop_bus_spin = 0;
3460 		drop_bus = 0;
3461 		drop_xfer = 0;
3462 
3463 		if (USB_IN_POLLING_MODE_FUNC() == 0) {
3464 			/* make sure that the BUS spin mutex is not locked */
3465 			while (mtx_owned(&bus->bus_spin_lock)) {
3466 				mtx_unlock_spin(&bus->bus_spin_lock);
3467 				drop_bus_spin++;
3468 			}
3469 
3470 			/* make sure that the BUS mutex is not locked */
3471 			while (mtx_owned(&bus->bus_mtx)) {
3472 				mtx_unlock(&bus->bus_mtx);
3473 				drop_bus++;
3474 			}
3475 
3476 			/* make sure that the transfer mutex is not locked */
3477 			while (mtx_owned(xroot->xfer_mtx)) {
3478 				mtx_unlock(xroot->xfer_mtx);
3479 				drop_xfer++;
3480 			}
3481 		}
3482 
3483 		/* Make sure cv_signal() and cv_broadcast() is not called */
3484 		USB_BUS_CONTROL_XFER_PROC(bus)->up_msleep = 0;
3485 		USB_BUS_EXPLORE_PROC(bus)->up_msleep = 0;
3486 		USB_BUS_GIANT_PROC(bus)->up_msleep = 0;
3487 		USB_BUS_NON_GIANT_ISOC_PROC(bus)->up_msleep = 0;
3488 		USB_BUS_NON_GIANT_BULK_PROC(bus)->up_msleep = 0;
3489 
3490 		/* poll USB hardware */
3491 		(bus->methods->xfer_poll) (bus);
3492 
3493 		USB_BUS_LOCK(xroot->bus);
3494 
3495 		/* check for clear stall */
3496 		if (udev->ctrl_xfer[1] != NULL) {
3497 			/* poll clear stall start */
3498 			pm = &udev->cs_msg[0].hdr;
3499 			(pm->pm_callback) (pm);
3500 			/* poll clear stall done thread */
3501 			pm = &udev->ctrl_xfer[1]->
3502 			    xroot->done_m[0].hdr;
3503 			(pm->pm_callback) (pm);
3504 		}
3505 
3506 		/* poll done thread */
3507 		pm = &xroot->done_m[0].hdr;
3508 		(pm->pm_callback) (pm);
3509 
3510 		USB_BUS_UNLOCK(xroot->bus);
3511 
3512 		/* restore transfer mutex */
3513 		while (drop_xfer--)
3514 			mtx_lock(xroot->xfer_mtx);
3515 
3516 		/* restore BUS mutex */
3517 		while (drop_bus--)
3518 			mtx_lock(&bus->bus_mtx);
3519 
3520 		/* restore BUS spin mutex */
3521 		while (drop_bus_spin--)
3522 			mtx_lock_spin(&bus->bus_spin_lock);
3523 	}
3524 }
3525 
3526 static void
3527 usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
3528     uint8_t type, enum usb_dev_speed speed)
3529 {
3530 	static const uint16_t intr_range_max[USB_SPEED_MAX] = {
3531 		[USB_SPEED_LOW] = 8,
3532 		[USB_SPEED_FULL] = 64,
3533 		[USB_SPEED_HIGH] = 1024,
3534 		[USB_SPEED_VARIABLE] = 1024,
3535 		[USB_SPEED_SUPER] = 1024,
3536 	};
3537 
3538 	static const uint16_t isoc_range_max[USB_SPEED_MAX] = {
3539 		[USB_SPEED_LOW] = 0,	/* invalid */
3540 		[USB_SPEED_FULL] = 1023,
3541 		[USB_SPEED_HIGH] = 1024,
3542 		[USB_SPEED_VARIABLE] = 3584,
3543 		[USB_SPEED_SUPER] = 1024,
3544 	};
3545 
3546 	static const uint16_t control_min[USB_SPEED_MAX] = {
3547 		[USB_SPEED_LOW] = 8,
3548 		[USB_SPEED_FULL] = 8,
3549 		[USB_SPEED_HIGH] = 64,
3550 		[USB_SPEED_VARIABLE] = 512,
3551 		[USB_SPEED_SUPER] = 512,
3552 	};
3553 
3554 	static const uint16_t bulk_min[USB_SPEED_MAX] = {
3555 		[USB_SPEED_LOW] = 8,
3556 		[USB_SPEED_FULL] = 8,
3557 		[USB_SPEED_HIGH] = 512,
3558 		[USB_SPEED_VARIABLE] = 512,
3559 		[USB_SPEED_SUPER] = 1024,
3560 	};
3561 
3562 	uint16_t temp;
3563 
3564 	memset(ptr, 0, sizeof(*ptr));
3565 
3566 	switch (type) {
3567 	case UE_INTERRUPT:
3568 		ptr->range.max = intr_range_max[speed];
3569 		break;
3570 	case UE_ISOCHRONOUS:
3571 		ptr->range.max = isoc_range_max[speed];
3572 		break;
3573 	default:
3574 		if (type == UE_BULK)
3575 			temp = bulk_min[speed];
3576 		else /* UE_CONTROL */
3577 			temp = control_min[speed];
3578 
3579 		/* default is fixed */
3580 		ptr->fixed[0] = temp;
3581 		ptr->fixed[1] = temp;
3582 		ptr->fixed[2] = temp;
3583 		ptr->fixed[3] = temp;
3584 
3585 		if (speed == USB_SPEED_FULL) {
3586 			/* multiple sizes */
3587 			ptr->fixed[1] = 16;
3588 			ptr->fixed[2] = 32;
3589 			ptr->fixed[3] = 64;
3590 		}
3591 		if ((speed == USB_SPEED_VARIABLE) &&
3592 		    (type == UE_BULK)) {
3593 			/* multiple sizes */
3594 			ptr->fixed[2] = 1024;
3595 			ptr->fixed[3] = 1536;
3596 		}
3597 		break;
3598 	}
3599 }
3600 
3601 void	*
3602 usbd_xfer_softc(struct usb_xfer *xfer)
3603 {
3604 	return (xfer->priv_sc);
3605 }
3606 
3607 void *
3608 usbd_xfer_get_priv(struct usb_xfer *xfer)
3609 {
3610 	return (xfer->priv_fifo);
3611 }
3612 
3613 void
3614 usbd_xfer_set_priv(struct usb_xfer *xfer, void *ptr)
3615 {
3616 	xfer->priv_fifo = ptr;
3617 }
3618 
3619 uint8_t
3620 usbd_xfer_state(struct usb_xfer *xfer)
3621 {
3622 	return (xfer->usb_state);
3623 }
3624 
3625 void
3626 usbd_xfer_set_flag(struct usb_xfer *xfer, int flag)
3627 {
3628 	switch (flag) {
3629 		case USB_FORCE_SHORT_XFER:
3630 			xfer->flags.force_short_xfer = 1;
3631 			break;
3632 		case USB_SHORT_XFER_OK:
3633 			xfer->flags.short_xfer_ok = 1;
3634 			break;
3635 		case USB_MULTI_SHORT_OK:
3636 			xfer->flags.short_frames_ok = 1;
3637 			break;
3638 		case USB_MANUAL_STATUS:
3639 			xfer->flags.manual_status = 1;
3640 			break;
3641 	}
3642 }
3643 
3644 void
3645 usbd_xfer_clr_flag(struct usb_xfer *xfer, int flag)
3646 {
3647 	switch (flag) {
3648 		case USB_FORCE_SHORT_XFER:
3649 			xfer->flags.force_short_xfer = 0;
3650 			break;
3651 		case USB_SHORT_XFER_OK:
3652 			xfer->flags.short_xfer_ok = 0;
3653 			break;
3654 		case USB_MULTI_SHORT_OK:
3655 			xfer->flags.short_frames_ok = 0;
3656 			break;
3657 		case USB_MANUAL_STATUS:
3658 			xfer->flags.manual_status = 0;
3659 			break;
3660 	}
3661 }
3662 
3663 /*
3664  * The following function returns in milliseconds when the isochronous
3665  * transfer was completed by the hardware. The returned value wraps
3666  * around 65536 milliseconds.
3667  */
3668 uint16_t
3669 usbd_xfer_get_timestamp(struct usb_xfer *xfer)
3670 {
3671 	return (xfer->isoc_time_complete);
3672 }
3673 
3674 /*
3675  * The following function returns non-zero if the max packet size
3676  * field was clamped to a valid value. Else it returns zero.
3677  */
3678 uint8_t
3679 usbd_xfer_maxp_was_clamped(struct usb_xfer *xfer)
3680 {
3681 	return (xfer->flags_int.maxp_was_clamped);
3682 }
3683 
3684 /*
3685  * The following function computes the next isochronous frame number
3686  * where the first isochronous packet should be queued.
3687  *
3688  * The function returns non-zero if there was a discontinuity.
3689  * Else zero is returned for normal operation.
3690  */
3691 uint8_t
3692 usbd_xfer_get_isochronous_start_frame(struct usb_xfer *xfer, uint32_t frame_curr,
3693     uint32_t frame_min, uint32_t frame_ms, uint32_t frame_mask, uint32_t *p_frame_start)
3694 {
3695 	uint32_t duration;
3696 	uint32_t delta;
3697 	uint8_t retval;
3698 	uint8_t shift;
3699 
3700 	/* Compute time ahead of current schedule. */
3701 	delta = (xfer->endpoint->isoc_next - frame_curr) & frame_mask;
3702 
3703 	/*
3704 	 * Check if it is the first transfer or if the future frame
3705 	 * delta is less than one millisecond or if the frame delta is
3706 	 * negative:
3707 	 */
3708 	if (xfer->endpoint->is_synced == 0 ||
3709 	    delta < (frame_ms + frame_min) ||
3710 	    delta > (frame_mask / 2)) {
3711 		/* Schedule transfer 2 milliseconds into the future. */
3712 		xfer->endpoint->isoc_next = (frame_curr + 2 * frame_ms + frame_min) & frame_mask;
3713 		xfer->endpoint->is_synced = 1;
3714 
3715 		retval = 1;
3716 	} else {
3717 		retval = 0;
3718 	}
3719 
3720 	/* Store start time, if any. */
3721 	if (p_frame_start != NULL)
3722 		*p_frame_start = xfer->endpoint->isoc_next & frame_mask;
3723 
3724 	/* Get relative completion time, in milliseconds. */
3725 	delta = xfer->endpoint->isoc_next - frame_curr + (frame_curr % frame_ms);
3726 	delta &= frame_mask;
3727 	delta /= frame_ms;
3728 
3729 	switch (usbd_get_speed(xfer->xroot->udev)) {
3730 	case USB_SPEED_FULL:
3731 		shift = 3;
3732 		break;
3733 	default:
3734 		shift = usbd_xfer_get_fps_shift(xfer);
3735 		break;
3736 	}
3737 
3738 	/* Get duration in milliseconds, rounded up. */
3739 	duration = ((xfer->nframes << shift) + 7) / 8;
3740 
3741 	/* Compute full 32-bit completion time, in milliseconds. */
3742 	xfer->isoc_time_complete =
3743 	    usb_isoc_time_expand(xfer->xroot->bus, frame_curr / frame_ms) +
3744 	    delta + duration;
3745 
3746 	/* Compute next isochronous frame. */
3747 	xfer->endpoint->isoc_next += duration * frame_ms;
3748 	xfer->endpoint->isoc_next &= frame_mask;
3749 
3750 	return (retval);
3751 }
3752