1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2008-2021 Hans Petter Selasky. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #ifdef USB_GLOBAL_INCLUDE_FILE
29 #include USB_GLOBAL_INCLUDE_FILE
30 #else
31 #include <sys/stdint.h>
32 #include <sys/stddef.h>
33 #include <sys/param.h>
34 #include <sys/queue.h>
35 #include <sys/types.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/module.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/condvar.h>
43 #include <sys/sysctl.h>
44 #include <sys/sx.h>
45 #include <sys/unistd.h>
46 #include <sys/callout.h>
47 #include <sys/malloc.h>
48 #include <sys/priv.h>
49
50 #include <dev/usb/usb.h>
51 #include <dev/usb/usbdi.h>
52 #include <dev/usb/usbdi_util.h>
53
54 #define USB_DEBUG_VAR usb_debug
55
56 #include <dev/usb/usb_core.h>
57 #include <dev/usb/usb_busdma.h>
58 #include <dev/usb/usb_process.h>
59 #include <dev/usb/usb_transfer.h>
60 #include <dev/usb/usb_device.h>
61 #include <dev/usb/usb_debug.h>
62 #include <dev/usb/usb_util.h>
63
64 #include <dev/usb/usb_controller.h>
65 #include <dev/usb/usb_bus.h>
66 #include <dev/usb/usb_pf.h>
67 #endif /* USB_GLOBAL_INCLUDE_FILE */
68
69 struct usb_std_packet_size {
70 struct {
71 uint16_t min; /* inclusive */
72 uint16_t max; /* inclusive */
73 } range;
74
75 uint16_t fixed[4];
76 };
77
78 static usb_callback_t usb_request_callback;
79
80 static const struct usb_config usb_control_ep_cfg[USB_CTRL_XFER_MAX] = {
81 /* This transfer is used for generic control endpoint transfers */
82
83 [0] = {
84 .type = UE_CONTROL,
85 .endpoint = 0x00, /* Control endpoint */
86 .direction = UE_DIR_ANY,
87 .bufsize = USB_EP0_BUFSIZE, /* bytes */
88 .flags = {.proxy_buffer = 1,},
89 .callback = &usb_request_callback,
90 .usb_mode = USB_MODE_DUAL, /* both modes */
91 },
92
93 /* This transfer is used for generic clear stall only */
94
95 [1] = {
96 .type = UE_CONTROL,
97 .endpoint = 0x00, /* Control pipe */
98 .direction = UE_DIR_ANY,
99 .bufsize = sizeof(struct usb_device_request),
100 .callback = &usb_do_clear_stall_callback,
101 .timeout = 1000, /* 1 second */
102 .interval = 50, /* 50ms */
103 .usb_mode = USB_MODE_HOST,
104 },
105 };
106
107 static const struct usb_config usb_control_ep_quirk_cfg[USB_CTRL_XFER_MAX] = {
108 /* This transfer is used for generic control endpoint transfers */
109
110 [0] = {
111 .type = UE_CONTROL,
112 .endpoint = 0x00, /* Control endpoint */
113 .direction = UE_DIR_ANY,
114 .bufsize = 65535, /* bytes */
115 .callback = &usb_request_callback,
116 .usb_mode = USB_MODE_DUAL, /* both modes */
117 },
118
119 /* This transfer is used for generic clear stall only */
120
121 [1] = {
122 .type = UE_CONTROL,
123 .endpoint = 0x00, /* Control pipe */
124 .direction = UE_DIR_ANY,
125 .bufsize = sizeof(struct usb_device_request),
126 .callback = &usb_do_clear_stall_callback,
127 .timeout = 1000, /* 1 second */
128 .interval = 50, /* 50ms */
129 .usb_mode = USB_MODE_HOST,
130 },
131 };
132
133 /* function prototypes */
134
135 static void usbd_update_max_frame_size(struct usb_xfer *);
136 static void usbd_transfer_unsetup_sub(struct usb_xfer_root *, uint8_t);
137 static void usbd_control_transfer_init(struct usb_xfer *);
138 static int usbd_setup_ctrl_transfer(struct usb_xfer *);
139 static void usb_callback_proc(struct usb_proc_msg *);
140 static void usbd_callback_ss_done_defer(struct usb_xfer *);
141 static void usbd_callback_wrapper(struct usb_xfer_queue *);
142 static void usbd_transfer_start_cb(void *);
143 static uint8_t usbd_callback_wrapper_sub(struct usb_xfer *);
144 static void usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
145 uint8_t type, enum usb_dev_speed speed);
146
147 /*------------------------------------------------------------------------*
148 * usb_request_callback
149 *------------------------------------------------------------------------*/
150 static void
usb_request_callback(struct usb_xfer * xfer,usb_error_t error)151 usb_request_callback(struct usb_xfer *xfer, usb_error_t error)
152 {
153 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE)
154 usb_handle_request_callback(xfer, error);
155 else
156 usbd_do_request_callback(xfer, error);
157 }
158
159 /*------------------------------------------------------------------------*
160 * usbd_update_max_frame_size
161 *
162 * This function updates the maximum frame size, hence high speed USB
163 * can transfer multiple consecutive packets.
164 *------------------------------------------------------------------------*/
165 static void
usbd_update_max_frame_size(struct usb_xfer * xfer)166 usbd_update_max_frame_size(struct usb_xfer *xfer)
167 {
168 /* compute maximum frame size */
169 /* this computation should not overflow 16-bit */
170 /* max = 15 * 1024 */
171
172 xfer->max_frame_size = xfer->max_packet_size * xfer->max_packet_count;
173 }
174
175 /*------------------------------------------------------------------------*
176 * usbd_get_dma_delay
177 *
178 * The following function is called when we need to
179 * synchronize with DMA hardware.
180 *
181 * Returns:
182 * 0: no DMA delay required
183 * Else: milliseconds of DMA delay
184 *------------------------------------------------------------------------*/
185 usb_timeout_t
usbd_get_dma_delay(struct usb_device * udev)186 usbd_get_dma_delay(struct usb_device *udev)
187 {
188 const struct usb_bus_methods *mtod;
189 uint32_t temp;
190
191 mtod = udev->bus->methods;
192 temp = 0;
193
194 if (mtod->get_dma_delay) {
195 (mtod->get_dma_delay) (udev, &temp);
196 /*
197 * Round up and convert to milliseconds. Note that we use
198 * 1024 milliseconds per second. to save a division.
199 */
200 temp += 0x3FF;
201 temp /= 0x400;
202 }
203 return (temp);
204 }
205
206 /*------------------------------------------------------------------------*
207 * usbd_transfer_setup_sub_malloc
208 *
209 * This function will allocate one or more DMA'able memory chunks
210 * according to "size", "align" and "count" arguments. "ppc" is
211 * pointed to a linear array of USB page caches afterwards.
212 *
213 * If the "align" argument is equal to "1" a non-contiguous allocation
214 * can happen. Else if the "align" argument is greater than "1", the
215 * allocation will always be contiguous in memory.
216 *
217 * Returns:
218 * 0: Success
219 * Else: Failure
220 *------------------------------------------------------------------------*/
221 #if USB_HAVE_BUSDMA
222 uint8_t
usbd_transfer_setup_sub_malloc(struct usb_setup_params * parm,struct usb_page_cache ** ppc,usb_size_t size,usb_size_t align,usb_size_t count)223 usbd_transfer_setup_sub_malloc(struct usb_setup_params *parm,
224 struct usb_page_cache **ppc, usb_size_t size, usb_size_t align,
225 usb_size_t count)
226 {
227 struct usb_page_cache *pc;
228 struct usb_page *pg;
229 void *buf;
230 usb_size_t n_dma_pc;
231 usb_size_t n_dma_pg;
232 usb_size_t n_obj;
233 usb_size_t x;
234 usb_size_t y;
235 usb_size_t r;
236 usb_size_t z;
237
238 USB_ASSERT(align > 0, ("Invalid alignment, 0x%08x\n",
239 align));
240 USB_ASSERT(size > 0, ("Invalid size = 0\n"));
241
242 if (count == 0) {
243 return (0); /* nothing to allocate */
244 }
245 /*
246 * Make sure that the size is aligned properly.
247 */
248 size = -((-size) & (-align));
249
250 /*
251 * Try multi-allocation chunks to reduce the number of DMA
252 * allocations, hence DMA allocations are slow.
253 */
254 if (align == 1) {
255 /* special case - non-cached multi page DMA memory */
256 n_dma_pc = count;
257 n_dma_pg = (2 + (size / USB_PAGE_SIZE));
258 n_obj = 1;
259 } else if (size >= USB_PAGE_SIZE) {
260 n_dma_pc = count;
261 n_dma_pg = 1;
262 n_obj = 1;
263 } else {
264 /* compute number of objects per page */
265 #ifdef USB_DMA_SINGLE_ALLOC
266 n_obj = 1;
267 #else
268 n_obj = (USB_PAGE_SIZE / size);
269 #endif
270 /*
271 * Compute number of DMA chunks, rounded up
272 * to nearest one:
273 */
274 n_dma_pc = howmany(count, n_obj);
275 n_dma_pg = 1;
276 }
277
278 /*
279 * DMA memory is allocated once, but mapped twice. That's why
280 * there is one list for auto-free and another list for
281 * non-auto-free which only holds the mapping and not the
282 * allocation.
283 */
284 if (parm->buf == NULL) {
285 /* reserve memory (auto-free) */
286 parm->dma_page_ptr += n_dma_pc * n_dma_pg;
287 parm->dma_page_cache_ptr += n_dma_pc;
288
289 /* reserve memory (no-auto-free) */
290 parm->dma_page_ptr += count * n_dma_pg;
291 parm->xfer_page_cache_ptr += count;
292 return (0);
293 }
294 for (x = 0; x != n_dma_pc; x++) {
295 /* need to initialize the page cache */
296 parm->dma_page_cache_ptr[x].tag_parent =
297 &parm->curr_xfer->xroot->dma_parent_tag;
298 }
299 for (x = 0; x != count; x++) {
300 /* need to initialize the page cache */
301 parm->xfer_page_cache_ptr[x].tag_parent =
302 &parm->curr_xfer->xroot->dma_parent_tag;
303 }
304
305 if (ppc != NULL) {
306 if (n_obj != 1)
307 *ppc = parm->xfer_page_cache_ptr;
308 else
309 *ppc = parm->dma_page_cache_ptr;
310 }
311 r = count; /* set remainder count */
312 z = n_obj * size; /* set allocation size */
313 pc = parm->xfer_page_cache_ptr;
314 pg = parm->dma_page_ptr;
315
316 if (n_obj == 1) {
317 /*
318 * Avoid mapping memory twice if only a single object
319 * should be allocated per page cache:
320 */
321 for (x = 0; x != n_dma_pc; x++) {
322 if (usb_pc_alloc_mem(parm->dma_page_cache_ptr,
323 pg, z, align)) {
324 return (1); /* failure */
325 }
326 /* Make room for one DMA page cache and "n_dma_pg" pages */
327 parm->dma_page_cache_ptr++;
328 pg += n_dma_pg;
329 }
330 } else {
331 for (x = 0; x != n_dma_pc; x++) {
332 if (r < n_obj) {
333 /* compute last remainder */
334 z = r * size;
335 n_obj = r;
336 }
337 if (usb_pc_alloc_mem(parm->dma_page_cache_ptr,
338 pg, z, align)) {
339 return (1); /* failure */
340 }
341 /* Set beginning of current buffer */
342 buf = parm->dma_page_cache_ptr->buffer;
343 /* Make room for one DMA page cache and "n_dma_pg" pages */
344 parm->dma_page_cache_ptr++;
345 pg += n_dma_pg;
346
347 for (y = 0; (y != n_obj); y++, r--, pc++, pg += n_dma_pg) {
348 /* Load sub-chunk into DMA */
349 if (usb_pc_dmamap_create(pc, size)) {
350 return (1); /* failure */
351 }
352 pc->buffer = USB_ADD_BYTES(buf, y * size);
353 pc->page_start = pg;
354
355 USB_MTX_LOCK(pc->tag_parent->mtx);
356 if (usb_pc_load_mem(pc, size, 1 /* synchronous */ )) {
357 USB_MTX_UNLOCK(pc->tag_parent->mtx);
358 return (1); /* failure */
359 }
360 USB_MTX_UNLOCK(pc->tag_parent->mtx);
361 }
362 }
363 }
364
365 parm->xfer_page_cache_ptr = pc;
366 parm->dma_page_ptr = pg;
367 return (0);
368 }
369 #endif
370
371 /*------------------------------------------------------------------------*
372 * usbd_get_max_frame_length
373 *
374 * This function returns the maximum single frame length as computed by
375 * usbd_transfer_setup(). It is useful when computing buffer sizes for
376 * devices having multiple alternate settings. The SuperSpeed endpoint
377 * companion pointer is allowed to be NULL.
378 *------------------------------------------------------------------------*/
379 uint32_t
usbd_get_max_frame_length(const struct usb_endpoint_descriptor * edesc,const struct usb_endpoint_ss_comp_descriptor * ecomp,enum usb_dev_speed speed)380 usbd_get_max_frame_length(const struct usb_endpoint_descriptor *edesc,
381 const struct usb_endpoint_ss_comp_descriptor *ecomp,
382 enum usb_dev_speed speed)
383 {
384 uint32_t max_packet_size;
385 uint32_t max_packet_count;
386 uint8_t type;
387
388 max_packet_size = UGETW(edesc->wMaxPacketSize);
389 max_packet_count = 1;
390 type = (edesc->bmAttributes & UE_XFERTYPE);
391
392 switch (speed) {
393 case USB_SPEED_HIGH:
394 switch (type) {
395 case UE_ISOCHRONOUS:
396 case UE_INTERRUPT:
397 max_packet_count +=
398 (max_packet_size >> 11) & 3;
399
400 /* check for invalid max packet count */
401 if (max_packet_count > 3)
402 max_packet_count = 3;
403 break;
404 default:
405 break;
406 }
407 max_packet_size &= 0x7FF;
408 break;
409 case USB_SPEED_SUPER:
410 max_packet_count += (max_packet_size >> 11) & 3;
411
412 if (ecomp != NULL)
413 max_packet_count += ecomp->bMaxBurst;
414
415 if ((max_packet_count == 0) ||
416 (max_packet_count > 16))
417 max_packet_count = 16;
418
419 switch (type) {
420 case UE_CONTROL:
421 max_packet_count = 1;
422 break;
423 case UE_ISOCHRONOUS:
424 if (ecomp != NULL) {
425 uint8_t mult;
426
427 mult = UE_GET_SS_ISO_MULT(
428 ecomp->bmAttributes) + 1;
429 if (mult > 3)
430 mult = 3;
431
432 max_packet_count *= mult;
433 }
434 break;
435 default:
436 break;
437 }
438 max_packet_size &= 0x7FF;
439 break;
440 default:
441 break;
442 }
443 return (max_packet_size * max_packet_count);
444 }
445
446 /*------------------------------------------------------------------------*
447 * usbd_transfer_setup_sub - transfer setup subroutine
448 *
449 * This function must be called from the "xfer_setup" callback of the
450 * USB Host or Device controller driver when setting up an USB
451 * transfer. This function will setup correct packet sizes, buffer
452 * sizes, flags and more, that are stored in the "usb_xfer"
453 * structure.
454 *------------------------------------------------------------------------*/
455 void
usbd_transfer_setup_sub(struct usb_setup_params * parm)456 usbd_transfer_setup_sub(struct usb_setup_params *parm)
457 {
458 enum {
459 REQ_SIZE = 8,
460 MIN_PKT = 8,
461 };
462 struct usb_xfer *xfer = parm->curr_xfer;
463 const struct usb_config *setup = parm->curr_setup;
464 struct usb_endpoint_ss_comp_descriptor *ecomp;
465 struct usb_endpoint_descriptor *edesc;
466 struct usb_std_packet_size std_size;
467 usb_frcount_t n_frlengths;
468 usb_frcount_t n_frbuffers;
469 usb_frcount_t x;
470 uint16_t maxp_old;
471 uint8_t type;
472 uint8_t zmps;
473
474 /*
475 * Sanity check. The following parameters must be initialized before
476 * calling this function.
477 */
478 if ((parm->hc_max_packet_size == 0) ||
479 (parm->hc_max_packet_count == 0) ||
480 (parm->hc_max_frame_size == 0)) {
481 parm->err = USB_ERR_INVAL;
482 goto done;
483 }
484 edesc = xfer->endpoint->edesc;
485 ecomp = xfer->endpoint->ecomp;
486
487 type = (edesc->bmAttributes & UE_XFERTYPE);
488
489 xfer->flags = setup->flags;
490 xfer->nframes = setup->frames;
491 xfer->timeout = setup->timeout;
492 xfer->callback = setup->callback;
493 xfer->interval = setup->interval;
494 xfer->endpointno = edesc->bEndpointAddress;
495 xfer->max_packet_size = UGETW(edesc->wMaxPacketSize);
496 xfer->max_packet_count = 1;
497 /* make a shadow copy: */
498 xfer->flags_int.usb_mode = parm->udev->flags.usb_mode;
499
500 parm->bufsize = setup->bufsize;
501
502 switch (parm->speed) {
503 case USB_SPEED_HIGH:
504 switch (type) {
505 case UE_ISOCHRONOUS:
506 case UE_INTERRUPT:
507 xfer->max_packet_count +=
508 (xfer->max_packet_size >> 11) & 3;
509
510 /* check for invalid max packet count */
511 if (xfer->max_packet_count > 3)
512 xfer->max_packet_count = 3;
513 break;
514 default:
515 break;
516 }
517 xfer->max_packet_size &= 0x7FF;
518 break;
519 case USB_SPEED_SUPER:
520 xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3;
521
522 if (ecomp != NULL)
523 xfer->max_packet_count += ecomp->bMaxBurst;
524
525 if ((xfer->max_packet_count == 0) ||
526 (xfer->max_packet_count > 16))
527 xfer->max_packet_count = 16;
528
529 switch (type) {
530 case UE_CONTROL:
531 xfer->max_packet_count = 1;
532 break;
533 case UE_ISOCHRONOUS:
534 if (ecomp != NULL) {
535 uint8_t mult;
536
537 mult = UE_GET_SS_ISO_MULT(
538 ecomp->bmAttributes) + 1;
539 if (mult > 3)
540 mult = 3;
541
542 xfer->max_packet_count *= mult;
543 }
544 break;
545 default:
546 break;
547 }
548 xfer->max_packet_size &= 0x7FF;
549 break;
550 default:
551 break;
552 }
553 /* range check "max_packet_count" */
554
555 if (xfer->max_packet_count > parm->hc_max_packet_count) {
556 xfer->max_packet_count = parm->hc_max_packet_count;
557 }
558
559 /* store max packet size value before filtering */
560
561 maxp_old = xfer->max_packet_size;
562
563 /* filter "wMaxPacketSize" according to HC capabilities */
564
565 if ((xfer->max_packet_size > parm->hc_max_packet_size) ||
566 (xfer->max_packet_size == 0)) {
567 xfer->max_packet_size = parm->hc_max_packet_size;
568 }
569 /* filter "wMaxPacketSize" according to standard sizes */
570
571 usbd_get_std_packet_size(&std_size, type, parm->speed);
572
573 if (std_size.range.min || std_size.range.max) {
574 if (xfer->max_packet_size < std_size.range.min) {
575 xfer->max_packet_size = std_size.range.min;
576 }
577 if (xfer->max_packet_size > std_size.range.max) {
578 xfer->max_packet_size = std_size.range.max;
579 }
580 } else {
581 if (xfer->max_packet_size >= std_size.fixed[3]) {
582 xfer->max_packet_size = std_size.fixed[3];
583 } else if (xfer->max_packet_size >= std_size.fixed[2]) {
584 xfer->max_packet_size = std_size.fixed[2];
585 } else if (xfer->max_packet_size >= std_size.fixed[1]) {
586 xfer->max_packet_size = std_size.fixed[1];
587 } else {
588 /* only one possibility left */
589 xfer->max_packet_size = std_size.fixed[0];
590 }
591 }
592
593 /*
594 * Check if the max packet size was outside its allowed range
595 * and clamped to a valid value:
596 */
597 if (maxp_old != xfer->max_packet_size)
598 xfer->flags_int.maxp_was_clamped = 1;
599
600 /* compute "max_frame_size" */
601
602 usbd_update_max_frame_size(xfer);
603
604 /* check interrupt interval and transfer pre-delay */
605
606 if (type == UE_ISOCHRONOUS) {
607 uint16_t frame_limit;
608
609 xfer->interval = 0; /* not used, must be zero */
610 xfer->flags_int.isochronous_xfr = 1; /* set flag */
611
612 if (xfer->timeout == 0) {
613 /*
614 * set a default timeout in
615 * case something goes wrong!
616 */
617 xfer->timeout = 1000 / 4;
618 }
619 switch (parm->speed) {
620 case USB_SPEED_LOW:
621 case USB_SPEED_FULL:
622 frame_limit = USB_MAX_FS_ISOC_FRAMES_PER_XFER;
623 xfer->fps_shift = 0;
624 break;
625 default:
626 frame_limit = USB_MAX_HS_ISOC_FRAMES_PER_XFER;
627 xfer->fps_shift = edesc->bInterval;
628 if (xfer->fps_shift > 0)
629 xfer->fps_shift--;
630 if (xfer->fps_shift > 3)
631 xfer->fps_shift = 3;
632 if (xfer->flags.pre_scale_frames != 0)
633 xfer->nframes <<= (3 - xfer->fps_shift);
634 break;
635 }
636
637 if (xfer->nframes > frame_limit) {
638 /*
639 * this is not going to work
640 * cross hardware
641 */
642 parm->err = USB_ERR_INVAL;
643 goto done;
644 }
645 if (xfer->nframes == 0) {
646 /*
647 * this is not a valid value
648 */
649 parm->err = USB_ERR_ZERO_NFRAMES;
650 goto done;
651 }
652 } else {
653 /*
654 * If a value is specified use that else check the
655 * endpoint descriptor!
656 */
657 if (type == UE_INTERRUPT) {
658 uint32_t temp;
659
660 if (xfer->interval == 0) {
661 xfer->interval = edesc->bInterval;
662
663 switch (parm->speed) {
664 case USB_SPEED_LOW:
665 case USB_SPEED_FULL:
666 break;
667 default:
668 /* 125us -> 1ms */
669 if (xfer->interval < 4)
670 xfer->interval = 1;
671 else if (xfer->interval > 16)
672 xfer->interval = (1 << (16 - 4));
673 else
674 xfer->interval =
675 (1 << (xfer->interval - 4));
676 break;
677 }
678 }
679
680 if (xfer->interval == 0) {
681 /*
682 * One millisecond is the smallest
683 * interval we support:
684 */
685 xfer->interval = 1;
686 }
687
688 xfer->fps_shift = 0;
689 temp = 1;
690
691 while ((temp != 0) && (temp < xfer->interval)) {
692 xfer->fps_shift++;
693 temp *= 2;
694 }
695
696 switch (parm->speed) {
697 case USB_SPEED_LOW:
698 case USB_SPEED_FULL:
699 break;
700 default:
701 xfer->fps_shift += 3;
702 break;
703 }
704 }
705 }
706
707 /*
708 * NOTE: we do not allow "max_packet_size" or "max_frame_size"
709 * to be equal to zero when setting up USB transfers, hence
710 * this leads to a lot of extra code in the USB kernel.
711 */
712
713 if ((xfer->max_frame_size == 0) ||
714 (xfer->max_packet_size == 0)) {
715 zmps = 1;
716
717 if ((parm->bufsize <= MIN_PKT) &&
718 (type != UE_CONTROL) &&
719 (type != UE_BULK)) {
720 /* workaround */
721 xfer->max_packet_size = MIN_PKT;
722 xfer->max_packet_count = 1;
723 parm->bufsize = 0; /* automatic setup length */
724 usbd_update_max_frame_size(xfer);
725
726 } else {
727 parm->err = USB_ERR_ZERO_MAXP;
728 goto done;
729 }
730
731 } else {
732 zmps = 0;
733 }
734
735 /*
736 * check if we should setup a default
737 * length:
738 */
739
740 if (parm->bufsize == 0) {
741 parm->bufsize = xfer->max_frame_size;
742
743 if (type == UE_ISOCHRONOUS) {
744 parm->bufsize *= xfer->nframes;
745 }
746 }
747 /*
748 * check if we are about to setup a proxy
749 * type of buffer:
750 */
751
752 if (xfer->flags.proxy_buffer) {
753 /* round bufsize up */
754
755 parm->bufsize += (xfer->max_frame_size - 1);
756
757 if (parm->bufsize < xfer->max_frame_size) {
758 /* length wrapped around */
759 parm->err = USB_ERR_INVAL;
760 goto done;
761 }
762 /* subtract remainder */
763
764 parm->bufsize -= (parm->bufsize % xfer->max_frame_size);
765
766 /* add length of USB device request structure, if any */
767
768 if (type == UE_CONTROL) {
769 parm->bufsize += REQ_SIZE; /* SETUP message */
770 }
771 }
772 xfer->max_data_length = parm->bufsize;
773
774 /* Setup "n_frlengths" and "n_frbuffers" */
775
776 if (type == UE_ISOCHRONOUS) {
777 n_frlengths = xfer->nframes;
778 n_frbuffers = 1;
779 } else {
780 if (type == UE_CONTROL) {
781 xfer->flags_int.control_xfr = 1;
782 if (xfer->nframes == 0) {
783 if (parm->bufsize <= REQ_SIZE) {
784 /*
785 * there will never be any data
786 * stage
787 */
788 xfer->nframes = 1;
789 } else {
790 xfer->nframes = 2;
791 }
792 }
793 } else {
794 if (xfer->nframes == 0) {
795 xfer->nframes = 1;
796 }
797 }
798
799 n_frlengths = xfer->nframes;
800 n_frbuffers = xfer->nframes;
801 }
802
803 /*
804 * check if we have room for the
805 * USB device request structure:
806 */
807
808 if (type == UE_CONTROL) {
809 if (xfer->max_data_length < REQ_SIZE) {
810 /* length wrapped around or too small bufsize */
811 parm->err = USB_ERR_INVAL;
812 goto done;
813 }
814 xfer->max_data_length -= REQ_SIZE;
815 }
816 /*
817 * Setup "frlengths" and shadow "frlengths" for keeping the
818 * initial frame lengths when a USB transfer is complete. This
819 * information is useful when computing isochronous offsets.
820 */
821 xfer->frlengths = parm->xfer_length_ptr;
822 parm->xfer_length_ptr += 2 * n_frlengths;
823
824 /* setup "frbuffers" */
825 xfer->frbuffers = parm->xfer_page_cache_ptr;
826 parm->xfer_page_cache_ptr += n_frbuffers;
827
828 /* initialize max frame count */
829 xfer->max_frame_count = xfer->nframes;
830
831 /*
832 * check if we need to setup
833 * a local buffer:
834 */
835
836 if (!xfer->flags.ext_buffer) {
837 #if USB_HAVE_BUSDMA
838 struct usb_page_search page_info;
839 struct usb_page_cache *pc;
840
841 if (usbd_transfer_setup_sub_malloc(parm,
842 &pc, parm->bufsize, 1, 1)) {
843 parm->err = USB_ERR_NOMEM;
844 } else if (parm->buf != NULL) {
845 usbd_get_page(pc, 0, &page_info);
846
847 xfer->local_buffer = page_info.buffer;
848
849 usbd_xfer_set_frame_offset(xfer, 0, 0);
850
851 if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
852 usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
853 }
854 }
855 #else
856 /* align data */
857 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
858
859 if (parm->buf != NULL) {
860 xfer->local_buffer =
861 USB_ADD_BYTES(parm->buf, parm->size[0]);
862
863 usbd_xfer_set_frame_offset(xfer, 0, 0);
864
865 if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
866 usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
867 }
868 }
869 parm->size[0] += parm->bufsize;
870
871 /* align data again */
872 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
873 #endif
874 }
875 /*
876 * Compute maximum buffer size
877 */
878
879 if (parm->bufsize_max < parm->bufsize) {
880 parm->bufsize_max = parm->bufsize;
881 }
882 #if USB_HAVE_BUSDMA
883 if (xfer->flags_int.bdma_enable) {
884 /*
885 * Setup "dma_page_ptr".
886 *
887 * Proof for formula below:
888 *
889 * Assume there are three USB frames having length "a", "b" and
890 * "c". These USB frames will at maximum need "z"
891 * "usb_page" structures. "z" is given by:
892 *
893 * z = ((a / USB_PAGE_SIZE) + 2) + ((b / USB_PAGE_SIZE) + 2) +
894 * ((c / USB_PAGE_SIZE) + 2);
895 *
896 * Constraining "a", "b" and "c" like this:
897 *
898 * (a + b + c) <= parm->bufsize
899 *
900 * We know that:
901 *
902 * z <= ((parm->bufsize / USB_PAGE_SIZE) + (3*2));
903 *
904 * Here is the general formula:
905 */
906 xfer->dma_page_ptr = parm->dma_page_ptr;
907 parm->dma_page_ptr += (2 * n_frbuffers);
908 parm->dma_page_ptr += (parm->bufsize / USB_PAGE_SIZE);
909 }
910 #endif
911 if (zmps) {
912 /* correct maximum data length */
913 xfer->max_data_length = 0;
914 }
915 /* subtract USB frame remainder from "hc_max_frame_size" */
916
917 xfer->max_hc_frame_size =
918 (parm->hc_max_frame_size -
919 (parm->hc_max_frame_size % xfer->max_frame_size));
920
921 if (xfer->max_hc_frame_size == 0) {
922 parm->err = USB_ERR_INVAL;
923 goto done;
924 }
925
926 /* initialize frame buffers */
927
928 if (parm->buf) {
929 for (x = 0; x != n_frbuffers; x++) {
930 xfer->frbuffers[x].tag_parent =
931 &xfer->xroot->dma_parent_tag;
932 #if USB_HAVE_BUSDMA
933 if (xfer->flags_int.bdma_enable &&
934 (parm->bufsize_max > 0)) {
935 if (usb_pc_dmamap_create(
936 xfer->frbuffers + x,
937 parm->bufsize_max)) {
938 parm->err = USB_ERR_NOMEM;
939 goto done;
940 }
941 }
942 #endif
943 }
944 }
945 done:
946 if (parm->err) {
947 /*
948 * Set some dummy values so that we avoid division by zero:
949 */
950 xfer->max_hc_frame_size = 1;
951 xfer->max_frame_size = 1;
952 xfer->max_packet_size = 1;
953 xfer->max_data_length = 0;
954 xfer->nframes = 0;
955 xfer->max_frame_count = 0;
956 }
957 }
958
959 static uint8_t
usbd_transfer_setup_has_bulk(const struct usb_config * setup_start,uint16_t n_setup)960 usbd_transfer_setup_has_bulk(const struct usb_config *setup_start,
961 uint16_t n_setup)
962 {
963 while (n_setup--) {
964 uint8_t type = setup_start[n_setup].type;
965 if (type == UE_BULK || type == UE_BULK_INTR ||
966 type == UE_TYPE_ANY)
967 return (1);
968 }
969 return (0);
970 }
971
972 /*------------------------------------------------------------------------*
973 * usbd_transfer_setup - setup an array of USB transfers
974 *
975 * NOTE: You must always call "usbd_transfer_unsetup" after calling
976 * "usbd_transfer_setup" if success was returned.
977 *
978 * The idea is that the USB device driver should pre-allocate all its
979 * transfers by one call to this function.
980 *
981 * Return values:
982 * 0: Success
983 * Else: Failure
984 *------------------------------------------------------------------------*/
985 usb_error_t
usbd_transfer_setup(struct usb_device * udev,const uint8_t * ifaces,struct usb_xfer ** ppxfer,const struct usb_config * setup_start,uint16_t n_setup,void * priv_sc,struct mtx * xfer_mtx)986 usbd_transfer_setup(struct usb_device *udev,
987 const uint8_t *ifaces, struct usb_xfer **ppxfer,
988 const struct usb_config *setup_start, uint16_t n_setup,
989 void *priv_sc, struct mtx *xfer_mtx)
990 {
991 const struct usb_config *setup_end = setup_start + n_setup;
992 const struct usb_config *setup;
993 struct usb_setup_params *parm;
994 struct usb_endpoint *ep;
995 struct usb_xfer_root *info;
996 struct usb_xfer *xfer;
997 void *buf = NULL;
998 usb_error_t error = 0;
999 uint16_t n;
1000 uint16_t refcount;
1001 uint8_t do_unlock;
1002
1003 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1004 "usbd_transfer_setup can sleep!");
1005
1006 /* do some checking first */
1007
1008 if (n_setup == 0) {
1009 DPRINTFN(6, "setup array has zero length!\n");
1010 return (USB_ERR_INVAL);
1011 }
1012 if (ifaces == NULL) {
1013 DPRINTFN(6, "ifaces array is NULL!\n");
1014 return (USB_ERR_INVAL);
1015 }
1016 if (xfer_mtx == NULL) {
1017 DPRINTFN(6, "using global lock\n");
1018 xfer_mtx = &Giant;
1019 }
1020
1021 /* more sanity checks */
1022
1023 for (setup = setup_start, n = 0;
1024 setup != setup_end; setup++, n++) {
1025 if (setup->bufsize == (usb_frlength_t)-1) {
1026 error = USB_ERR_BAD_BUFSIZE;
1027 DPRINTF("invalid bufsize\n");
1028 }
1029 if (setup->callback == NULL) {
1030 error = USB_ERR_NO_CALLBACK;
1031 DPRINTF("no callback\n");
1032 }
1033 ppxfer[n] = NULL;
1034 }
1035
1036 if (error)
1037 return (error);
1038
1039 /* Protect scratch area */
1040 do_unlock = usbd_ctrl_lock(udev);
1041
1042 refcount = 0;
1043 info = NULL;
1044
1045 parm = &udev->scratch.xfer_setup[0].parm;
1046 memset(parm, 0, sizeof(*parm));
1047
1048 parm->udev = udev;
1049 parm->speed = usbd_get_speed(udev);
1050 parm->hc_max_packet_count = 1;
1051
1052 if (parm->speed >= USB_SPEED_MAX) {
1053 parm->err = USB_ERR_INVAL;
1054 goto done;
1055 }
1056 /* setup all transfers */
1057
1058 while (1) {
1059 if (buf) {
1060 /*
1061 * Initialize the "usb_xfer_root" structure,
1062 * which is common for all our USB transfers.
1063 */
1064 info = USB_ADD_BYTES(buf, 0);
1065
1066 info->memory_base = buf;
1067 info->memory_size = parm->size[0];
1068
1069 #if USB_HAVE_BUSDMA
1070 info->dma_page_cache_start = USB_ADD_BYTES(buf, parm->size[4]);
1071 info->dma_page_cache_end = USB_ADD_BYTES(buf, parm->size[5]);
1072 #endif
1073 info->xfer_page_cache_start = USB_ADD_BYTES(buf, parm->size[5]);
1074 info->xfer_page_cache_end = USB_ADD_BYTES(buf, parm->size[2]);
1075
1076 cv_init(&info->cv_drain, "WDRAIN");
1077
1078 info->xfer_mtx = xfer_mtx;
1079 #if USB_HAVE_BUSDMA
1080 usb_dma_tag_setup(&info->dma_parent_tag,
1081 parm->dma_tag_p, udev->bus->dma_parent_tag[0].tag,
1082 xfer_mtx, &usb_bdma_done_event, udev->bus->dma_bits,
1083 parm->dma_tag_max);
1084 #endif
1085
1086 info->bus = udev->bus;
1087 info->udev = udev;
1088
1089 TAILQ_INIT(&info->done_q.head);
1090 info->done_q.command = &usbd_callback_wrapper;
1091 #if USB_HAVE_BUSDMA
1092 TAILQ_INIT(&info->dma_q.head);
1093 info->dma_q.command = &usb_bdma_work_loop;
1094 #endif
1095 info->done_m[0].hdr.pm_callback = &usb_callback_proc;
1096 info->done_m[0].xroot = info;
1097 info->done_m[1].hdr.pm_callback = &usb_callback_proc;
1098 info->done_m[1].xroot = info;
1099
1100 /*
1101 * In device side mode control endpoint
1102 * requests need to run from a separate
1103 * context, else there is a chance of
1104 * deadlock!
1105 */
1106 if (setup_start == usb_control_ep_cfg ||
1107 setup_start == usb_control_ep_quirk_cfg)
1108 info->done_p =
1109 USB_BUS_CONTROL_XFER_PROC(udev->bus);
1110 else if (xfer_mtx == &Giant)
1111 info->done_p =
1112 USB_BUS_GIANT_PROC(udev->bus);
1113 else if (usbd_transfer_setup_has_bulk(setup_start, n_setup))
1114 info->done_p =
1115 USB_BUS_NON_GIANT_BULK_PROC(udev->bus);
1116 else
1117 info->done_p =
1118 USB_BUS_NON_GIANT_ISOC_PROC(udev->bus);
1119 }
1120 /* reset sizes */
1121
1122 parm->size[0] = 0;
1123 parm->buf = buf;
1124 parm->size[0] += sizeof(info[0]);
1125
1126 for (setup = setup_start, n = 0;
1127 setup != setup_end; setup++, n++) {
1128 /* skip USB transfers without callbacks: */
1129 if (setup->callback == NULL) {
1130 continue;
1131 }
1132 /* see if there is a matching endpoint */
1133 ep = usbd_get_endpoint(udev,
1134 ifaces[setup->if_index], setup);
1135
1136 /*
1137 * Check that the USB PIPE is valid and that
1138 * the endpoint mode is proper.
1139 *
1140 * Make sure we don't allocate a streams
1141 * transfer when such a combination is not
1142 * valid.
1143 */
1144 if ((ep == NULL) || (ep->methods == NULL) ||
1145 ((ep->ep_mode != USB_EP_MODE_STREAMS) &&
1146 (ep->ep_mode != USB_EP_MODE_DEFAULT)) ||
1147 (setup->stream_id != 0 &&
1148 (setup->stream_id >= USB_MAX_EP_STREAMS ||
1149 (ep->ep_mode != USB_EP_MODE_STREAMS)))) {
1150 if (setup->flags.no_pipe_ok)
1151 continue;
1152 if ((setup->usb_mode != USB_MODE_DUAL) &&
1153 (setup->usb_mode != udev->flags.usb_mode))
1154 continue;
1155 parm->err = USB_ERR_NO_PIPE;
1156 goto done;
1157 }
1158
1159 /* align data properly */
1160 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1161
1162 /* store current setup pointer */
1163 parm->curr_setup = setup;
1164
1165 if (buf) {
1166 /*
1167 * Common initialization of the
1168 * "usb_xfer" structure.
1169 */
1170 xfer = USB_ADD_BYTES(buf, parm->size[0]);
1171 xfer->address = udev->address;
1172 xfer->priv_sc = priv_sc;
1173 xfer->xroot = info;
1174
1175 usb_callout_init_mtx(&xfer->timeout_handle,
1176 &udev->bus->bus_mtx, 0);
1177 } else {
1178 /*
1179 * Setup a dummy xfer, hence we are
1180 * writing to the "usb_xfer"
1181 * structure pointed to by "xfer"
1182 * before we have allocated any
1183 * memory:
1184 */
1185 xfer = &udev->scratch.xfer_setup[0].dummy;
1186 memset(xfer, 0, sizeof(*xfer));
1187 refcount++;
1188 }
1189
1190 /* set transfer endpoint pointer */
1191 xfer->endpoint = ep;
1192
1193 /* set transfer stream ID */
1194 xfer->stream_id = setup->stream_id;
1195
1196 parm->size[0] += sizeof(xfer[0]);
1197 parm->methods = xfer->endpoint->methods;
1198 parm->curr_xfer = xfer;
1199
1200 /*
1201 * Call the Host or Device controller transfer
1202 * setup routine:
1203 */
1204 (udev->bus->methods->xfer_setup) (parm);
1205
1206 /* check for error */
1207 if (parm->err)
1208 goto done;
1209
1210 if (buf) {
1211 /*
1212 * Increment the endpoint refcount. This
1213 * basically prevents setting a new
1214 * configuration and alternate setting
1215 * when USB transfers are in use on
1216 * the given interface. Search the USB
1217 * code for "endpoint->refcount_alloc" if you
1218 * want more information.
1219 */
1220 USB_BUS_LOCK(info->bus);
1221 if (xfer->endpoint->refcount_alloc >= USB_EP_REF_MAX)
1222 parm->err = USB_ERR_INVAL;
1223
1224 xfer->endpoint->refcount_alloc++;
1225
1226 if (xfer->endpoint->refcount_alloc == 0)
1227 panic("usbd_transfer_setup(): Refcount wrapped to zero\n");
1228 USB_BUS_UNLOCK(info->bus);
1229
1230 /*
1231 * Whenever we set ppxfer[] then we
1232 * also need to increment the
1233 * "setup_refcount":
1234 */
1235 info->setup_refcount++;
1236
1237 /*
1238 * Transfer is successfully setup and
1239 * can be used:
1240 */
1241 ppxfer[n] = xfer;
1242 }
1243
1244 /* check for error */
1245 if (parm->err)
1246 goto done;
1247 }
1248
1249 if (buf != NULL || parm->err != 0)
1250 goto done;
1251
1252 /* if no transfers, nothing to do */
1253 if (refcount == 0)
1254 goto done;
1255
1256 /* align data properly */
1257 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1258
1259 /* store offset temporarily */
1260 parm->size[1] = parm->size[0];
1261
1262 /*
1263 * The number of DMA tags required depends on
1264 * the number of endpoints. The current estimate
1265 * for maximum number of DMA tags per endpoint
1266 * is three:
1267 * 1) for loading memory
1268 * 2) for allocating memory
1269 * 3) for fixing memory [UHCI]
1270 */
1271 parm->dma_tag_max += 3 * MIN(n_setup, USB_EP_MAX);
1272
1273 /*
1274 * DMA tags for QH, TD, Data and more.
1275 */
1276 parm->dma_tag_max += 8;
1277
1278 parm->dma_tag_p += parm->dma_tag_max;
1279
1280 parm->size[0] += ((uint8_t *)parm->dma_tag_p) -
1281 ((uint8_t *)0);
1282
1283 /* align data properly */
1284 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1285
1286 /* store offset temporarily */
1287 parm->size[3] = parm->size[0];
1288
1289 parm->size[0] += ((uint8_t *)parm->dma_page_ptr) -
1290 ((uint8_t *)0);
1291
1292 /* align data properly */
1293 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1294
1295 /* store offset temporarily */
1296 parm->size[4] = parm->size[0];
1297
1298 parm->size[0] += ((uint8_t *)parm->dma_page_cache_ptr) -
1299 ((uint8_t *)0);
1300
1301 /* store end offset temporarily */
1302 parm->size[5] = parm->size[0];
1303
1304 parm->size[0] += ((uint8_t *)parm->xfer_page_cache_ptr) -
1305 ((uint8_t *)0);
1306
1307 /* store end offset temporarily */
1308
1309 parm->size[2] = parm->size[0];
1310
1311 /* align data properly */
1312 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1313
1314 parm->size[6] = parm->size[0];
1315
1316 parm->size[0] += ((uint8_t *)parm->xfer_length_ptr) -
1317 ((uint8_t *)0);
1318
1319 /* align data properly */
1320 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1321
1322 /* allocate zeroed memory */
1323 buf = malloc(parm->size[0], M_USB, M_WAITOK | M_ZERO);
1324 #if (USB_HAVE_MALLOC_WAITOK == 0)
1325 if (buf == NULL) {
1326 parm->err = USB_ERR_NOMEM;
1327 DPRINTFN(0, "cannot allocate memory block for "
1328 "configuration (%d bytes)\n",
1329 parm->size[0]);
1330 goto done;
1331 }
1332 #endif
1333 parm->dma_tag_p = USB_ADD_BYTES(buf, parm->size[1]);
1334 parm->dma_page_ptr = USB_ADD_BYTES(buf, parm->size[3]);
1335 parm->dma_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[4]);
1336 parm->xfer_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[5]);
1337 parm->xfer_length_ptr = USB_ADD_BYTES(buf, parm->size[6]);
1338 }
1339
1340 done:
1341 if (buf) {
1342 if (info->setup_refcount == 0) {
1343 /*
1344 * "usbd_transfer_unsetup_sub" will unlock
1345 * the bus mutex before returning !
1346 */
1347 USB_BUS_LOCK(info->bus);
1348
1349 /* something went wrong */
1350 usbd_transfer_unsetup_sub(info, 0);
1351 }
1352 }
1353
1354 /* check if any errors happened */
1355 if (parm->err)
1356 usbd_transfer_unsetup(ppxfer, n_setup);
1357
1358 error = parm->err;
1359
1360 if (do_unlock)
1361 usbd_ctrl_unlock(udev);
1362
1363 return (error);
1364 }
1365
1366 /*------------------------------------------------------------------------*
1367 * usbd_transfer_unsetup_sub - factored out code
1368 *------------------------------------------------------------------------*/
1369 static void
usbd_transfer_unsetup_sub(struct usb_xfer_root * info,uint8_t needs_delay)1370 usbd_transfer_unsetup_sub(struct usb_xfer_root *info, uint8_t needs_delay)
1371 {
1372 #if USB_HAVE_BUSDMA
1373 struct usb_page_cache *pc;
1374 #endif
1375
1376 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
1377
1378 /* wait for any outstanding DMA operations */
1379
1380 if (needs_delay) {
1381 usb_timeout_t temp;
1382 temp = usbd_get_dma_delay(info->udev);
1383 if (temp != 0) {
1384 usb_pause_mtx(&info->bus->bus_mtx,
1385 USB_MS_TO_TICKS(temp));
1386 }
1387 }
1388
1389 /* make sure that our done messages are not queued anywhere */
1390 usb_proc_mwait(info->done_p, &info->done_m[0], &info->done_m[1]);
1391
1392 USB_BUS_UNLOCK(info->bus);
1393
1394 #if USB_HAVE_BUSDMA
1395 /* free DMA'able memory, if any */
1396 pc = info->dma_page_cache_start;
1397 while (pc != info->dma_page_cache_end) {
1398 usb_pc_free_mem(pc);
1399 pc++;
1400 }
1401
1402 /* free DMA maps in all "xfer->frbuffers" */
1403 pc = info->xfer_page_cache_start;
1404 while (pc != info->xfer_page_cache_end) {
1405 usb_pc_dmamap_destroy(pc);
1406 pc++;
1407 }
1408
1409 /* free all DMA tags */
1410 usb_dma_tag_unsetup(&info->dma_parent_tag);
1411 #endif
1412
1413 cv_destroy(&info->cv_drain);
1414
1415 /*
1416 * free the "memory_base" last, hence the "info" structure is
1417 * contained within the "memory_base"!
1418 */
1419 free(info->memory_base, M_USB);
1420 }
1421
1422 /*------------------------------------------------------------------------*
1423 * usbd_transfer_unsetup - unsetup/free an array of USB transfers
1424 *
1425 * NOTE: All USB transfers in progress will get called back passing
1426 * the error code "USB_ERR_CANCELLED" before this function
1427 * returns.
1428 *------------------------------------------------------------------------*/
1429 void
usbd_transfer_unsetup(struct usb_xfer ** pxfer,uint16_t n_setup)1430 usbd_transfer_unsetup(struct usb_xfer **pxfer, uint16_t n_setup)
1431 {
1432 struct usb_xfer *xfer;
1433 struct usb_xfer_root *info;
1434 uint8_t needs_delay = 0;
1435
1436 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1437 "usbd_transfer_unsetup can sleep!");
1438
1439 while (n_setup--) {
1440 xfer = pxfer[n_setup];
1441
1442 if (xfer == NULL)
1443 continue;
1444
1445 info = xfer->xroot;
1446
1447 USB_XFER_LOCK(xfer);
1448 USB_BUS_LOCK(info->bus);
1449
1450 /*
1451 * HINT: when you start/stop a transfer, it might be a
1452 * good idea to directly use the "pxfer[]" structure:
1453 *
1454 * usbd_transfer_start(sc->pxfer[0]);
1455 * usbd_transfer_stop(sc->pxfer[0]);
1456 *
1457 * That way, if your code has many parts that will not
1458 * stop running under the same lock, in other words
1459 * "xfer_mtx", the usbd_transfer_start and
1460 * usbd_transfer_stop functions will simply return
1461 * when they detect a NULL pointer argument.
1462 *
1463 * To avoid any races we clear the "pxfer[]" pointer
1464 * while holding the private mutex of the driver:
1465 */
1466 pxfer[n_setup] = NULL;
1467
1468 USB_BUS_UNLOCK(info->bus);
1469 USB_XFER_UNLOCK(xfer);
1470
1471 usbd_transfer_drain(xfer);
1472
1473 #if USB_HAVE_BUSDMA
1474 if (xfer->flags_int.bdma_enable)
1475 needs_delay = 1;
1476 #endif
1477 /*
1478 * NOTE: default endpoint does not have an
1479 * interface, even if endpoint->iface_index == 0
1480 */
1481 USB_BUS_LOCK(info->bus);
1482 xfer->endpoint->refcount_alloc--;
1483 USB_BUS_UNLOCK(info->bus);
1484
1485 usb_callout_drain(&xfer->timeout_handle);
1486
1487 USB_BUS_LOCK(info->bus);
1488
1489 USB_ASSERT(info->setup_refcount != 0, ("Invalid setup "
1490 "reference count\n"));
1491
1492 info->setup_refcount--;
1493
1494 if (info->setup_refcount == 0) {
1495 usbd_transfer_unsetup_sub(info,
1496 needs_delay);
1497 } else {
1498 USB_BUS_UNLOCK(info->bus);
1499 }
1500 }
1501 }
1502
1503 /*------------------------------------------------------------------------*
1504 * usbd_control_transfer_init - factored out code
1505 *
1506 * In USB Device Mode we have to wait for the SETUP packet which
1507 * containst the "struct usb_device_request" structure, before we can
1508 * transfer any data. In USB Host Mode we already have the SETUP
1509 * packet at the moment the USB transfer is started. This leads us to
1510 * having to setup the USB transfer at two different places in
1511 * time. This function just contains factored out control transfer
1512 * initialisation code, so that we don't duplicate the code.
1513 *------------------------------------------------------------------------*/
1514 static void
usbd_control_transfer_init(struct usb_xfer * xfer)1515 usbd_control_transfer_init(struct usb_xfer *xfer)
1516 {
1517 struct usb_device_request req;
1518
1519 /* copy out the USB request header */
1520
1521 usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1522
1523 /* setup remainder */
1524
1525 xfer->flags_int.control_rem = UGETW(req.wLength);
1526
1527 /* copy direction to endpoint variable */
1528
1529 xfer->endpointno &= ~(UE_DIR_IN | UE_DIR_OUT);
1530 xfer->endpointno |=
1531 (req.bmRequestType & UT_READ) ? UE_DIR_IN : UE_DIR_OUT;
1532 }
1533
1534 /*------------------------------------------------------------------------*
1535 * usbd_control_transfer_did_data
1536 *
1537 * This function returns non-zero if a control endpoint has
1538 * transferred the first DATA packet after the SETUP packet.
1539 * Else it returns zero.
1540 *------------------------------------------------------------------------*/
1541 static uint8_t
usbd_control_transfer_did_data(struct usb_xfer * xfer)1542 usbd_control_transfer_did_data(struct usb_xfer *xfer)
1543 {
1544 struct usb_device_request req;
1545
1546 /* SETUP packet is not yet sent */
1547 if (xfer->flags_int.control_hdr != 0)
1548 return (0);
1549
1550 /* copy out the USB request header */
1551 usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1552
1553 /* compare remainder to the initial value */
1554 return (xfer->flags_int.control_rem != UGETW(req.wLength));
1555 }
1556
1557 /*------------------------------------------------------------------------*
1558 * usbd_setup_ctrl_transfer
1559 *
1560 * This function handles initialisation of control transfers. Control
1561 * transfers are special in that regard that they can both transmit
1562 * and receive data.
1563 *
1564 * Return values:
1565 * 0: Success
1566 * Else: Failure
1567 *------------------------------------------------------------------------*/
1568 static int
usbd_setup_ctrl_transfer(struct usb_xfer * xfer)1569 usbd_setup_ctrl_transfer(struct usb_xfer *xfer)
1570 {
1571 usb_frlength_t len;
1572
1573 /* Check for control endpoint stall */
1574 if (xfer->flags.stall_pipe && xfer->flags_int.control_act) {
1575 /* the control transfer is no longer active */
1576 xfer->flags_int.control_stall = 1;
1577 xfer->flags_int.control_act = 0;
1578 } else {
1579 /* don't stall control transfer by default */
1580 xfer->flags_int.control_stall = 0;
1581 }
1582
1583 /* Check for invalid number of frames */
1584 if (xfer->nframes > 2) {
1585 /*
1586 * If you need to split a control transfer, you
1587 * have to do one part at a time. Only with
1588 * non-control transfers you can do multiple
1589 * parts a time.
1590 */
1591 DPRINTFN(0, "Too many frames: %u\n",
1592 (unsigned)xfer->nframes);
1593 goto error;
1594 }
1595
1596 /*
1597 * Check if there is a control
1598 * transfer in progress:
1599 */
1600 if (xfer->flags_int.control_act) {
1601 if (xfer->flags_int.control_hdr) {
1602 /* clear send header flag */
1603
1604 xfer->flags_int.control_hdr = 0;
1605
1606 /* setup control transfer */
1607 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1608 usbd_control_transfer_init(xfer);
1609 }
1610 }
1611 /* get data length */
1612
1613 len = xfer->sumlen;
1614
1615 } else {
1616 /* the size of the SETUP structure is hardcoded ! */
1617
1618 if (xfer->frlengths[0] != sizeof(struct usb_device_request)) {
1619 DPRINTFN(0, "Wrong framelength %u != %zu\n",
1620 xfer->frlengths[0], sizeof(struct
1621 usb_device_request));
1622 goto error;
1623 }
1624 /* check USB mode */
1625 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1626 /* check number of frames */
1627 if (xfer->nframes != 1) {
1628 /*
1629 * We need to receive the setup
1630 * message first so that we know the
1631 * data direction!
1632 */
1633 DPRINTF("Misconfigured transfer\n");
1634 goto error;
1635 }
1636 /*
1637 * Set a dummy "control_rem" value. This
1638 * variable will be overwritten later by a
1639 * call to "usbd_control_transfer_init()" !
1640 */
1641 xfer->flags_int.control_rem = 0xFFFF;
1642 } else {
1643 /* setup "endpoint" and "control_rem" */
1644
1645 usbd_control_transfer_init(xfer);
1646 }
1647
1648 /* set transfer-header flag */
1649
1650 xfer->flags_int.control_hdr = 1;
1651
1652 /* get data length */
1653
1654 len = (xfer->sumlen - sizeof(struct usb_device_request));
1655 }
1656
1657 /* update did data flag */
1658
1659 xfer->flags_int.control_did_data =
1660 usbd_control_transfer_did_data(xfer);
1661
1662 /* check if there is a length mismatch */
1663
1664 if (len > xfer->flags_int.control_rem) {
1665 DPRINTFN(0, "Length (%d) greater than "
1666 "remaining length (%d)\n", len,
1667 xfer->flags_int.control_rem);
1668 goto error;
1669 }
1670 /* check if we are doing a short transfer */
1671
1672 if (xfer->flags.force_short_xfer) {
1673 xfer->flags_int.control_rem = 0;
1674 } else {
1675 if ((len != xfer->max_data_length) &&
1676 (len != xfer->flags_int.control_rem) &&
1677 (xfer->nframes != 1)) {
1678 DPRINTFN(0, "Short control transfer without "
1679 "force_short_xfer set\n");
1680 goto error;
1681 }
1682 xfer->flags_int.control_rem -= len;
1683 }
1684
1685 /* the status part is executed when "control_act" is 0 */
1686
1687 if ((xfer->flags_int.control_rem > 0) ||
1688 (xfer->flags.manual_status)) {
1689 /* don't execute the STATUS stage yet */
1690 xfer->flags_int.control_act = 1;
1691
1692 /* sanity check */
1693 if ((!xfer->flags_int.control_hdr) &&
1694 (xfer->nframes == 1)) {
1695 /*
1696 * This is not a valid operation!
1697 */
1698 DPRINTFN(0, "Invalid parameter "
1699 "combination\n");
1700 goto error;
1701 }
1702 } else {
1703 /* time to execute the STATUS stage */
1704 xfer->flags_int.control_act = 0;
1705 }
1706 return (0); /* success */
1707
1708 error:
1709 return (1); /* failure */
1710 }
1711
1712 /*------------------------------------------------------------------------*
1713 * usbd_transfer_submit - start USB hardware for the given transfer
1714 *
1715 * This function should only be called from the USB callback.
1716 *------------------------------------------------------------------------*/
1717 void
usbd_transfer_submit(struct usb_xfer * xfer)1718 usbd_transfer_submit(struct usb_xfer *xfer)
1719 {
1720 struct usb_xfer_root *info;
1721 struct usb_bus *bus;
1722 usb_frcount_t x;
1723
1724 info = xfer->xroot;
1725 bus = info->bus;
1726
1727 DPRINTF("xfer=%p, endpoint=%p, nframes=%d, dir=%s\n",
1728 xfer, xfer->endpoint, xfer->nframes, USB_GET_DATA_ISREAD(xfer) ?
1729 "read" : "write");
1730
1731 #ifdef USB_DEBUG
1732 if (USB_DEBUG_VAR > 0) {
1733 USB_BUS_LOCK(bus);
1734
1735 usb_dump_endpoint(xfer->endpoint);
1736
1737 USB_BUS_UNLOCK(bus);
1738 }
1739 #endif
1740
1741 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1742 USB_BUS_LOCK_ASSERT(bus, MA_NOTOWNED);
1743
1744 /* Only open the USB transfer once! */
1745 if (!xfer->flags_int.open) {
1746 xfer->flags_int.open = 1;
1747
1748 DPRINTF("open\n");
1749
1750 USB_BUS_LOCK(bus);
1751 (xfer->endpoint->methods->open) (xfer);
1752 USB_BUS_UNLOCK(bus);
1753 }
1754 /* set "transferring" flag */
1755 xfer->flags_int.transferring = 1;
1756
1757 #if USB_HAVE_POWERD
1758 /* increment power reference */
1759 usbd_transfer_power_ref(xfer, 1);
1760 #endif
1761 /*
1762 * Check if the transfer is waiting on a queue, most
1763 * frequently the "done_q":
1764 */
1765 if (xfer->wait_queue) {
1766 USB_BUS_LOCK(bus);
1767 usbd_transfer_dequeue(xfer);
1768 USB_BUS_UNLOCK(bus);
1769 }
1770 /* clear "did_dma_delay" flag */
1771 xfer->flags_int.did_dma_delay = 0;
1772
1773 /* clear "did_close" flag */
1774 xfer->flags_int.did_close = 0;
1775
1776 #if USB_HAVE_BUSDMA
1777 /* clear "bdma_setup" flag */
1778 xfer->flags_int.bdma_setup = 0;
1779 #endif
1780 /* by default we cannot cancel any USB transfer immediately */
1781 xfer->flags_int.can_cancel_immed = 0;
1782
1783 /* clear lengths and frame counts by default */
1784 xfer->sumlen = 0;
1785 xfer->actlen = 0;
1786 xfer->aframes = 0;
1787
1788 /* clear any previous errors */
1789 xfer->error = 0;
1790
1791 /* Check if the device is still alive */
1792 if (info->udev->state < USB_STATE_POWERED) {
1793 USB_BUS_LOCK(bus);
1794 /*
1795 * Must return cancelled error code else
1796 * device drivers can hang.
1797 */
1798 usbd_transfer_done(xfer, USB_ERR_CANCELLED);
1799 USB_BUS_UNLOCK(bus);
1800 return;
1801 }
1802
1803 /* sanity check */
1804 if (xfer->nframes == 0) {
1805 if (xfer->flags.stall_pipe) {
1806 /*
1807 * Special case - want to stall without transferring
1808 * any data:
1809 */
1810 DPRINTF("xfer=%p nframes=0: stall "
1811 "or clear stall!\n", xfer);
1812 USB_BUS_LOCK(bus);
1813 xfer->flags_int.can_cancel_immed = 1;
1814 /* start the transfer */
1815 usb_command_wrapper(&xfer->endpoint->
1816 endpoint_q[xfer->stream_id], xfer);
1817 USB_BUS_UNLOCK(bus);
1818 return;
1819 }
1820 USB_BUS_LOCK(bus);
1821 usbd_transfer_done(xfer, USB_ERR_INVAL);
1822 USB_BUS_UNLOCK(bus);
1823 return;
1824 }
1825 /* compute some variables */
1826
1827 for (x = 0; x != xfer->nframes; x++) {
1828 /* make a copy of the frlenghts[] */
1829 xfer->frlengths[x + xfer->max_frame_count] = xfer->frlengths[x];
1830 /* compute total transfer length */
1831 xfer->sumlen += xfer->frlengths[x];
1832 if (xfer->sumlen < xfer->frlengths[x]) {
1833 /* length wrapped around */
1834 USB_BUS_LOCK(bus);
1835 usbd_transfer_done(xfer, USB_ERR_INVAL);
1836 USB_BUS_UNLOCK(bus);
1837 return;
1838 }
1839 }
1840
1841 /* clear some internal flags */
1842
1843 xfer->flags_int.short_xfer_ok = 0;
1844 xfer->flags_int.short_frames_ok = 0;
1845
1846 /* check if this is a control transfer */
1847
1848 if (xfer->flags_int.control_xfr) {
1849 if (usbd_setup_ctrl_transfer(xfer)) {
1850 USB_BUS_LOCK(bus);
1851 usbd_transfer_done(xfer, USB_ERR_STALLED);
1852 USB_BUS_UNLOCK(bus);
1853 return;
1854 }
1855 }
1856 /*
1857 * Setup filtered version of some transfer flags,
1858 * in case of data read direction
1859 */
1860 if (USB_GET_DATA_ISREAD(xfer)) {
1861 if (xfer->flags.short_frames_ok) {
1862 xfer->flags_int.short_xfer_ok = 1;
1863 xfer->flags_int.short_frames_ok = 1;
1864 } else if (xfer->flags.short_xfer_ok) {
1865 xfer->flags_int.short_xfer_ok = 1;
1866
1867 /* check for control transfer */
1868 if (xfer->flags_int.control_xfr) {
1869 /*
1870 * 1) Control transfers do not support
1871 * reception of multiple short USB
1872 * frames in host mode and device side
1873 * mode, with exception of:
1874 *
1875 * 2) Due to sometimes buggy device
1876 * side firmware we need to do a
1877 * STATUS stage in case of short
1878 * control transfers in USB host mode.
1879 * The STATUS stage then becomes the
1880 * "alt_next" to the DATA stage.
1881 */
1882 xfer->flags_int.short_frames_ok = 1;
1883 }
1884 }
1885 }
1886 /*
1887 * Check if BUS-DMA support is enabled and try to load virtual
1888 * buffers into DMA, if any:
1889 */
1890 #if USB_HAVE_BUSDMA
1891 if (xfer->flags_int.bdma_enable) {
1892 /* insert the USB transfer last in the BUS-DMA queue */
1893 usb_command_wrapper(&xfer->xroot->dma_q, xfer);
1894 return;
1895 }
1896 #endif
1897 /*
1898 * Enter the USB transfer into the Host Controller or
1899 * Device Controller schedule:
1900 */
1901 usbd_pipe_enter(xfer);
1902 }
1903
1904 /*------------------------------------------------------------------------*
1905 * usbd_pipe_enter - factored out code
1906 *------------------------------------------------------------------------*/
1907 void
usbd_pipe_enter(struct usb_xfer * xfer)1908 usbd_pipe_enter(struct usb_xfer *xfer)
1909 {
1910 struct usb_endpoint *ep;
1911
1912 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1913
1914 USB_BUS_LOCK(xfer->xroot->bus);
1915
1916 ep = xfer->endpoint;
1917
1918 DPRINTF("enter\n");
1919
1920 /* the transfer can now be cancelled */
1921 xfer->flags_int.can_cancel_immed = 1;
1922
1923 /* enter the transfer */
1924 (ep->methods->enter) (xfer);
1925
1926 /* check for transfer error */
1927 if (xfer->error) {
1928 /* some error has happened */
1929 usbd_transfer_done(xfer, 0);
1930 USB_BUS_UNLOCK(xfer->xroot->bus);
1931 return;
1932 }
1933
1934 /* start the transfer */
1935 usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], xfer);
1936 USB_BUS_UNLOCK(xfer->xroot->bus);
1937 }
1938
1939 /*------------------------------------------------------------------------*
1940 * usbd_transfer_start - start an USB transfer
1941 *
1942 * NOTE: Calling this function more than one time will only
1943 * result in a single transfer start, until the USB transfer
1944 * completes.
1945 *------------------------------------------------------------------------*/
1946 void
usbd_transfer_start(struct usb_xfer * xfer)1947 usbd_transfer_start(struct usb_xfer *xfer)
1948 {
1949 if (xfer == NULL) {
1950 /* transfer is gone */
1951 return;
1952 }
1953 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1954
1955 /* mark the USB transfer started */
1956
1957 if (!xfer->flags_int.started) {
1958 /* lock the BUS lock to avoid races updating flags_int */
1959 USB_BUS_LOCK(xfer->xroot->bus);
1960 xfer->flags_int.started = 1;
1961 USB_BUS_UNLOCK(xfer->xroot->bus);
1962 }
1963 /* check if the USB transfer callback is already transferring */
1964
1965 if (xfer->flags_int.transferring) {
1966 return;
1967 }
1968 USB_BUS_LOCK(xfer->xroot->bus);
1969 /* call the USB transfer callback */
1970 usbd_callback_ss_done_defer(xfer);
1971 USB_BUS_UNLOCK(xfer->xroot->bus);
1972 }
1973
1974 /*------------------------------------------------------------------------*
1975 * usbd_transfer_stop - stop an USB transfer
1976 *
1977 * NOTE: Calling this function more than one time will only
1978 * result in a single transfer stop.
1979 * NOTE: When this function returns it is not safe to free nor
1980 * reuse any DMA buffers. See "usbd_transfer_drain()".
1981 *------------------------------------------------------------------------*/
1982 void
usbd_transfer_stop(struct usb_xfer * xfer)1983 usbd_transfer_stop(struct usb_xfer *xfer)
1984 {
1985 struct usb_endpoint *ep;
1986
1987 if (xfer == NULL) {
1988 /* transfer is gone */
1989 return;
1990 }
1991 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1992
1993 /* check if the USB transfer was ever opened */
1994
1995 if (!xfer->flags_int.open) {
1996 if (xfer->flags_int.started) {
1997 /* nothing to do except clearing the "started" flag */
1998 /* lock the BUS lock to avoid races updating flags_int */
1999 USB_BUS_LOCK(xfer->xroot->bus);
2000 xfer->flags_int.started = 0;
2001 USB_BUS_UNLOCK(xfer->xroot->bus);
2002 }
2003 return;
2004 }
2005 /* try to stop the current USB transfer */
2006
2007 USB_BUS_LOCK(xfer->xroot->bus);
2008 /* override any previous error */
2009 xfer->error = USB_ERR_CANCELLED;
2010
2011 /*
2012 * Clear "open" and "started" when both private and USB lock
2013 * is locked so that we don't get a race updating "flags_int"
2014 */
2015 xfer->flags_int.open = 0;
2016 xfer->flags_int.started = 0;
2017
2018 /*
2019 * Check if we can cancel the USB transfer immediately.
2020 */
2021 if (xfer->flags_int.transferring) {
2022 if (xfer->flags_int.can_cancel_immed &&
2023 (!xfer->flags_int.did_close)) {
2024 DPRINTF("close\n");
2025 /*
2026 * The following will lead to an USB_ERR_CANCELLED
2027 * error code being passed to the USB callback.
2028 */
2029 (xfer->endpoint->methods->close) (xfer);
2030 /* only close once */
2031 xfer->flags_int.did_close = 1;
2032 } else {
2033 /* need to wait for the next done callback */
2034 }
2035 } else {
2036 DPRINTF("close\n");
2037
2038 /* close here and now */
2039 (xfer->endpoint->methods->close) (xfer);
2040
2041 /*
2042 * Any additional DMA delay is done by
2043 * "usbd_transfer_unsetup()".
2044 */
2045
2046 /*
2047 * Special case. Check if we need to restart a blocked
2048 * endpoint.
2049 */
2050 ep = xfer->endpoint;
2051
2052 /*
2053 * If the current USB transfer is completing we need
2054 * to start the next one:
2055 */
2056 if (ep->endpoint_q[xfer->stream_id].curr == xfer) {
2057 usb_command_wrapper(
2058 &ep->endpoint_q[xfer->stream_id], NULL);
2059 }
2060 }
2061
2062 USB_BUS_UNLOCK(xfer->xroot->bus);
2063 }
2064
2065 /*------------------------------------------------------------------------*
2066 * usbd_transfer_pending
2067 *
2068 * This function will check if an USB transfer is pending which is a
2069 * little bit complicated!
2070 * Return values:
2071 * 0: Not pending
2072 * 1: Pending: The USB transfer will receive a callback in the future.
2073 *------------------------------------------------------------------------*/
2074 uint8_t
usbd_transfer_pending(struct usb_xfer * xfer)2075 usbd_transfer_pending(struct usb_xfer *xfer)
2076 {
2077 struct usb_xfer_root *info;
2078 struct usb_xfer_queue *pq;
2079
2080 if (xfer == NULL) {
2081 /* transfer is gone */
2082 return (0);
2083 }
2084 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2085
2086 if (xfer->flags_int.transferring) {
2087 /* trivial case */
2088 return (1);
2089 }
2090 USB_BUS_LOCK(xfer->xroot->bus);
2091 if (xfer->wait_queue) {
2092 /* we are waiting on a queue somewhere */
2093 USB_BUS_UNLOCK(xfer->xroot->bus);
2094 return (1);
2095 }
2096 info = xfer->xroot;
2097 pq = &info->done_q;
2098
2099 if (pq->curr == xfer) {
2100 /* we are currently scheduled for callback */
2101 USB_BUS_UNLOCK(xfer->xroot->bus);
2102 return (1);
2103 }
2104 /* we are not pending */
2105 USB_BUS_UNLOCK(xfer->xroot->bus);
2106 return (0);
2107 }
2108
2109 /*------------------------------------------------------------------------*
2110 * usbd_transfer_drain
2111 *
2112 * This function will stop the USB transfer and wait for any
2113 * additional BUS-DMA and HW-DMA operations to complete. Buffers that
2114 * are loaded into DMA can safely be freed or reused after that this
2115 * function has returned.
2116 *------------------------------------------------------------------------*/
2117 void
usbd_transfer_drain(struct usb_xfer * xfer)2118 usbd_transfer_drain(struct usb_xfer *xfer)
2119 {
2120 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2121 "usbd_transfer_drain can sleep!");
2122
2123 if (xfer == NULL) {
2124 /* transfer is gone */
2125 return;
2126 }
2127 if (xfer->xroot->xfer_mtx != &Giant) {
2128 USB_XFER_LOCK_ASSERT(xfer, MA_NOTOWNED);
2129 }
2130 USB_XFER_LOCK(xfer);
2131
2132 usbd_transfer_stop(xfer);
2133
2134 while (usbd_transfer_pending(xfer) ||
2135 xfer->flags_int.doing_callback) {
2136 /*
2137 * It is allowed that the callback can drop its
2138 * transfer mutex. In that case checking only
2139 * "usbd_transfer_pending()" is not enough to tell if
2140 * the USB transfer is fully drained. We also need to
2141 * check the internal "doing_callback" flag.
2142 */
2143 xfer->flags_int.draining = 1;
2144
2145 /*
2146 * Wait until the current outstanding USB
2147 * transfer is complete !
2148 */
2149 cv_wait(&xfer->xroot->cv_drain, xfer->xroot->xfer_mtx);
2150 }
2151 USB_XFER_UNLOCK(xfer);
2152 }
2153
2154 struct usb_page_cache *
usbd_xfer_get_frame(struct usb_xfer * xfer,usb_frcount_t frindex)2155 usbd_xfer_get_frame(struct usb_xfer *xfer, usb_frcount_t frindex)
2156 {
2157 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2158
2159 return (&xfer->frbuffers[frindex]);
2160 }
2161
2162 void *
usbd_xfer_get_frame_buffer(struct usb_xfer * xfer,usb_frcount_t frindex)2163 usbd_xfer_get_frame_buffer(struct usb_xfer *xfer, usb_frcount_t frindex)
2164 {
2165 struct usb_page_search page_info;
2166
2167 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2168
2169 usbd_get_page(&xfer->frbuffers[frindex], 0, &page_info);
2170 return (page_info.buffer);
2171 }
2172
2173 /*------------------------------------------------------------------------*
2174 * usbd_xfer_get_fps_shift
2175 *
2176 * The following function is only useful for isochronous transfers. It
2177 * returns how many times the frame execution rate has been shifted
2178 * down.
2179 *
2180 * Return value:
2181 * Success: 0..3
2182 * Failure: 0
2183 *------------------------------------------------------------------------*/
2184 uint8_t
usbd_xfer_get_fps_shift(struct usb_xfer * xfer)2185 usbd_xfer_get_fps_shift(struct usb_xfer *xfer)
2186 {
2187 return (xfer->fps_shift);
2188 }
2189
2190 usb_frlength_t
usbd_xfer_frame_len(struct usb_xfer * xfer,usb_frcount_t frindex)2191 usbd_xfer_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex)
2192 {
2193 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2194
2195 return (xfer->frlengths[frindex]);
2196 }
2197
2198 /*------------------------------------------------------------------------*
2199 * usbd_xfer_set_frame_data
2200 *
2201 * This function sets the pointer of the buffer that should
2202 * loaded directly into DMA for the given USB frame. Passing "ptr"
2203 * equal to NULL while the corresponding "frlength" is greater
2204 * than zero gives undefined results!
2205 *------------------------------------------------------------------------*/
2206 void
usbd_xfer_set_frame_data(struct usb_xfer * xfer,usb_frcount_t frindex,void * ptr,usb_frlength_t len)2207 usbd_xfer_set_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
2208 void *ptr, usb_frlength_t len)
2209 {
2210 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2211
2212 /* set virtual address to load and length */
2213 xfer->frbuffers[frindex].buffer = ptr;
2214 usbd_xfer_set_frame_len(xfer, frindex, len);
2215 }
2216
2217 void
usbd_xfer_frame_data(struct usb_xfer * xfer,usb_frcount_t frindex,void ** ptr,int * len)2218 usbd_xfer_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
2219 void **ptr, int *len)
2220 {
2221 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2222
2223 if (ptr != NULL)
2224 *ptr = xfer->frbuffers[frindex].buffer;
2225 if (len != NULL)
2226 *len = xfer->frlengths[frindex];
2227 }
2228
2229 /*------------------------------------------------------------------------*
2230 * usbd_xfer_old_frame_length
2231 *
2232 * This function returns the framelength of the given frame at the
2233 * time the transfer was submitted. This function can be used to
2234 * compute the starting data pointer of the next isochronous frame
2235 * when an isochronous transfer has completed.
2236 *------------------------------------------------------------------------*/
2237 usb_frlength_t
usbd_xfer_old_frame_length(struct usb_xfer * xfer,usb_frcount_t frindex)2238 usbd_xfer_old_frame_length(struct usb_xfer *xfer, usb_frcount_t frindex)
2239 {
2240 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2241
2242 return (xfer->frlengths[frindex + xfer->max_frame_count]);
2243 }
2244
2245 void
usbd_xfer_status(struct usb_xfer * xfer,int * actlen,int * sumlen,int * aframes,int * nframes)2246 usbd_xfer_status(struct usb_xfer *xfer, int *actlen, int *sumlen, int *aframes,
2247 int *nframes)
2248 {
2249 if (actlen != NULL)
2250 *actlen = xfer->actlen;
2251 if (sumlen != NULL)
2252 *sumlen = xfer->sumlen;
2253 if (aframes != NULL)
2254 *aframes = xfer->aframes;
2255 if (nframes != NULL)
2256 *nframes = xfer->nframes;
2257 }
2258
2259 /*------------------------------------------------------------------------*
2260 * usbd_xfer_set_frame_offset
2261 *
2262 * This function sets the frame data buffer offset relative to the beginning
2263 * of the USB DMA buffer allocated for this USB transfer.
2264 *------------------------------------------------------------------------*/
2265 void
usbd_xfer_set_frame_offset(struct usb_xfer * xfer,usb_frlength_t offset,usb_frcount_t frindex)2266 usbd_xfer_set_frame_offset(struct usb_xfer *xfer, usb_frlength_t offset,
2267 usb_frcount_t frindex)
2268 {
2269 KASSERT(!xfer->flags.ext_buffer, ("Cannot offset data frame "
2270 "when the USB buffer is external\n"));
2271 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2272
2273 /* set virtual address to load */
2274 xfer->frbuffers[frindex].buffer =
2275 USB_ADD_BYTES(xfer->local_buffer, offset);
2276 }
2277
2278 void
usbd_xfer_set_interval(struct usb_xfer * xfer,int i)2279 usbd_xfer_set_interval(struct usb_xfer *xfer, int i)
2280 {
2281 xfer->interval = i;
2282 }
2283
2284 void
usbd_xfer_set_timeout(struct usb_xfer * xfer,int t)2285 usbd_xfer_set_timeout(struct usb_xfer *xfer, int t)
2286 {
2287 xfer->timeout = t;
2288 }
2289
2290 void
usbd_xfer_set_frames(struct usb_xfer * xfer,usb_frcount_t n)2291 usbd_xfer_set_frames(struct usb_xfer *xfer, usb_frcount_t n)
2292 {
2293 xfer->nframes = n;
2294 }
2295
2296 usb_frcount_t
usbd_xfer_max_frames(struct usb_xfer * xfer)2297 usbd_xfer_max_frames(struct usb_xfer *xfer)
2298 {
2299 return (xfer->max_frame_count);
2300 }
2301
2302 usb_frlength_t
usbd_xfer_max_len(struct usb_xfer * xfer)2303 usbd_xfer_max_len(struct usb_xfer *xfer)
2304 {
2305 return (xfer->max_data_length);
2306 }
2307
2308 usb_frlength_t
usbd_xfer_max_framelen(struct usb_xfer * xfer)2309 usbd_xfer_max_framelen(struct usb_xfer *xfer)
2310 {
2311 return (xfer->max_frame_size);
2312 }
2313
2314 void
usbd_xfer_set_frame_len(struct usb_xfer * xfer,usb_frcount_t frindex,usb_frlength_t len)2315 usbd_xfer_set_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex,
2316 usb_frlength_t len)
2317 {
2318 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2319
2320 xfer->frlengths[frindex] = len;
2321 }
2322
2323 /*------------------------------------------------------------------------*
2324 * usb_callback_proc - factored out code
2325 *
2326 * This function performs USB callbacks.
2327 *------------------------------------------------------------------------*/
2328 static void
usb_callback_proc(struct usb_proc_msg * _pm)2329 usb_callback_proc(struct usb_proc_msg *_pm)
2330 {
2331 struct usb_done_msg *pm = (void *)_pm;
2332 struct usb_xfer_root *info = pm->xroot;
2333
2334 /* Change locking order */
2335 USB_BUS_UNLOCK(info->bus);
2336
2337 /*
2338 * We exploit the fact that the mutex is the same for all
2339 * callbacks that will be called from this thread:
2340 */
2341 USB_MTX_LOCK(info->xfer_mtx);
2342 USB_BUS_LOCK(info->bus);
2343
2344 /* Continue where we lost track */
2345 usb_command_wrapper(&info->done_q,
2346 info->done_q.curr);
2347
2348 USB_MTX_UNLOCK(info->xfer_mtx);
2349 }
2350
2351 /*------------------------------------------------------------------------*
2352 * usbd_callback_ss_done_defer
2353 *
2354 * This function will defer the start, stop and done callback to the
2355 * correct thread.
2356 *------------------------------------------------------------------------*/
2357 static void
usbd_callback_ss_done_defer(struct usb_xfer * xfer)2358 usbd_callback_ss_done_defer(struct usb_xfer *xfer)
2359 {
2360 struct usb_xfer_root *info = xfer->xroot;
2361 struct usb_xfer_queue *pq = &info->done_q;
2362
2363 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2364
2365 if (pq->curr != xfer) {
2366 usbd_transfer_enqueue(pq, xfer);
2367 }
2368 if (!pq->recurse_1) {
2369 /*
2370 * We have to postpone the callback due to the fact we
2371 * will have a Lock Order Reversal, LOR, if we try to
2372 * proceed !
2373 */
2374 (void) usb_proc_msignal(info->done_p,
2375 &info->done_m[0], &info->done_m[1]);
2376 } else {
2377 /* clear second recurse flag */
2378 pq->recurse_2 = 0;
2379 }
2380 return;
2381
2382 }
2383
2384 /*------------------------------------------------------------------------*
2385 * usbd_callback_wrapper
2386 *
2387 * This is a wrapper for USB callbacks. This wrapper does some
2388 * auto-magic things like figuring out if we can call the callback
2389 * directly from the current context or if we need to wakeup the
2390 * interrupt process.
2391 *------------------------------------------------------------------------*/
2392 static void
usbd_callback_wrapper(struct usb_xfer_queue * pq)2393 usbd_callback_wrapper(struct usb_xfer_queue *pq)
2394 {
2395 struct usb_xfer *xfer = pq->curr;
2396 struct usb_xfer_root *info = xfer->xroot;
2397
2398 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2399 if ((pq->recurse_3 != 0 || mtx_owned(info->xfer_mtx) == 0) &&
2400 USB_IN_POLLING_MODE_FUNC() == 0) {
2401 /*
2402 * Cases that end up here:
2403 *
2404 * 5) HW interrupt done callback or other source.
2405 * 6) HW completed transfer during callback
2406 */
2407 DPRINTFN(3, "case 5 and 6\n");
2408
2409 /*
2410 * We have to postpone the callback due to the fact we
2411 * will have a Lock Order Reversal, LOR, if we try to
2412 * proceed!
2413 *
2414 * Postponing the callback also ensures that other USB
2415 * transfer queues get a chance.
2416 */
2417 (void) usb_proc_msignal(info->done_p,
2418 &info->done_m[0], &info->done_m[1]);
2419 return;
2420 }
2421 /*
2422 * Cases that end up here:
2423 *
2424 * 1) We are starting a transfer
2425 * 2) We are prematurely calling back a transfer
2426 * 3) We are stopping a transfer
2427 * 4) We are doing an ordinary callback
2428 */
2429 DPRINTFN(3, "case 1-4\n");
2430 /* get next USB transfer in the queue */
2431 info->done_q.curr = NULL;
2432
2433 /* set flag in case of drain */
2434 xfer->flags_int.doing_callback = 1;
2435
2436 USB_BUS_UNLOCK(info->bus);
2437 USB_BUS_LOCK_ASSERT(info->bus, MA_NOTOWNED);
2438
2439 /* set correct USB state for callback */
2440 if (!xfer->flags_int.transferring) {
2441 xfer->usb_state = USB_ST_SETUP;
2442 if (!xfer->flags_int.started) {
2443 /* we got stopped before we even got started */
2444 USB_BUS_LOCK(info->bus);
2445 goto done;
2446 }
2447 } else {
2448 if (usbd_callback_wrapper_sub(xfer)) {
2449 /* the callback has been deferred */
2450 USB_BUS_LOCK(info->bus);
2451 goto done;
2452 }
2453 #if USB_HAVE_POWERD
2454 /* decrement power reference */
2455 usbd_transfer_power_ref(xfer, -1);
2456 #endif
2457 xfer->flags_int.transferring = 0;
2458
2459 if (xfer->error) {
2460 xfer->usb_state = USB_ST_ERROR;
2461 } else {
2462 /* set transferred state */
2463 xfer->usb_state = USB_ST_TRANSFERRED;
2464 #if USB_HAVE_BUSDMA
2465 /* sync DMA memory, if any */
2466 if (xfer->flags_int.bdma_enable &&
2467 (!xfer->flags_int.bdma_no_post_sync)) {
2468 usb_bdma_post_sync(xfer);
2469 }
2470 #endif
2471 }
2472 }
2473
2474 #if USB_HAVE_PF
2475 if (xfer->usb_state != USB_ST_SETUP) {
2476 USB_BUS_LOCK(info->bus);
2477 usbpf_xfertap(xfer, USBPF_XFERTAP_DONE);
2478 USB_BUS_UNLOCK(info->bus);
2479 }
2480 #endif
2481 /* call processing routine */
2482 (xfer->callback) (xfer, xfer->error);
2483
2484 /* pickup the USB mutex again */
2485 USB_BUS_LOCK(info->bus);
2486
2487 /*
2488 * Check if we got started after that we got cancelled, but
2489 * before we managed to do the callback.
2490 */
2491 if ((!xfer->flags_int.open) &&
2492 (xfer->flags_int.started) &&
2493 (xfer->usb_state == USB_ST_ERROR)) {
2494 /* clear flag in case of drain */
2495 xfer->flags_int.doing_callback = 0;
2496 /* try to loop, but not recursivly */
2497 usb_command_wrapper(&info->done_q, xfer);
2498 return;
2499 }
2500
2501 done:
2502 /* clear flag in case of drain */
2503 xfer->flags_int.doing_callback = 0;
2504
2505 /*
2506 * Check if we are draining.
2507 */
2508 if (xfer->flags_int.draining &&
2509 (!xfer->flags_int.transferring)) {
2510 /* "usbd_transfer_drain()" is waiting for end of transfer */
2511 xfer->flags_int.draining = 0;
2512 cv_broadcast(&info->cv_drain);
2513 }
2514
2515 /* do the next callback, if any */
2516 usb_command_wrapper(&info->done_q,
2517 info->done_q.curr);
2518 }
2519
2520 /*------------------------------------------------------------------------*
2521 * usb_dma_delay_done_cb
2522 *
2523 * This function is called when the DMA delay has been exectuded, and
2524 * will make sure that the callback is called to complete the USB
2525 * transfer. This code path is usually only used when there is an USB
2526 * error like USB_ERR_CANCELLED.
2527 *------------------------------------------------------------------------*/
2528 void
usb_dma_delay_done_cb(struct usb_xfer * xfer)2529 usb_dma_delay_done_cb(struct usb_xfer *xfer)
2530 {
2531 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2532
2533 DPRINTFN(3, "Completed %p\n", xfer);
2534
2535 /* queue callback for execution, again */
2536 usbd_transfer_done(xfer, 0);
2537 }
2538
2539 /*------------------------------------------------------------------------*
2540 * usbd_transfer_dequeue
2541 *
2542 * - This function is used to remove an USB transfer from a USB
2543 * transfer queue.
2544 *
2545 * - This function can be called multiple times in a row.
2546 *------------------------------------------------------------------------*/
2547 void
usbd_transfer_dequeue(struct usb_xfer * xfer)2548 usbd_transfer_dequeue(struct usb_xfer *xfer)
2549 {
2550 struct usb_xfer_queue *pq;
2551
2552 pq = xfer->wait_queue;
2553 if (pq) {
2554 TAILQ_REMOVE(&pq->head, xfer, wait_entry);
2555 xfer->wait_queue = NULL;
2556 }
2557 }
2558
2559 /*------------------------------------------------------------------------*
2560 * usbd_transfer_enqueue
2561 *
2562 * - This function is used to insert an USB transfer into a USB *
2563 * transfer queue.
2564 *
2565 * - This function can be called multiple times in a row.
2566 *------------------------------------------------------------------------*/
2567 void
usbd_transfer_enqueue(struct usb_xfer_queue * pq,struct usb_xfer * xfer)2568 usbd_transfer_enqueue(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2569 {
2570 /*
2571 * Insert the USB transfer into the queue, if it is not
2572 * already on a USB transfer queue:
2573 */
2574 if (xfer->wait_queue == NULL) {
2575 xfer->wait_queue = pq;
2576 TAILQ_INSERT_TAIL(&pq->head, xfer, wait_entry);
2577 }
2578 }
2579
2580 /*------------------------------------------------------------------------*
2581 * usbd_transfer_done
2582 *
2583 * - This function is used to remove an USB transfer from the busdma,
2584 * pipe or interrupt queue.
2585 *
2586 * - This function is used to queue the USB transfer on the done
2587 * queue.
2588 *
2589 * - This function is used to stop any USB transfer timeouts.
2590 *------------------------------------------------------------------------*/
2591 void
usbd_transfer_done(struct usb_xfer * xfer,usb_error_t error)2592 usbd_transfer_done(struct usb_xfer *xfer, usb_error_t error)
2593 {
2594 struct usb_xfer_root *info = xfer->xroot;
2595
2596 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2597
2598 DPRINTF("err=%s\n", usbd_errstr(error));
2599
2600 /*
2601 * If we are not transferring then just return.
2602 * This can happen during transfer cancel.
2603 */
2604 if (!xfer->flags_int.transferring) {
2605 DPRINTF("not transferring\n");
2606 /* end of control transfer, if any */
2607 xfer->flags_int.control_act = 0;
2608 return;
2609 }
2610 /* only set transfer error, if not already set */
2611 if (xfer->error == USB_ERR_NORMAL_COMPLETION)
2612 xfer->error = error;
2613
2614 /* stop any callouts */
2615 usb_callout_stop(&xfer->timeout_handle);
2616
2617 /*
2618 * If we are waiting on a queue, just remove the USB transfer
2619 * from the queue, if any. We should have the required locks
2620 * locked to do the remove when this function is called.
2621 */
2622 usbd_transfer_dequeue(xfer);
2623
2624 #if USB_HAVE_BUSDMA
2625 if (mtx_owned(info->xfer_mtx)) {
2626 struct usb_xfer_queue *pq;
2627
2628 /*
2629 * If the private USB lock is not locked, then we assume
2630 * that the BUS-DMA load stage has been passed:
2631 */
2632 pq = &info->dma_q;
2633
2634 if (pq->curr == xfer) {
2635 /* start the next BUS-DMA load, if any */
2636 usb_command_wrapper(pq, NULL);
2637 }
2638 }
2639 #endif
2640 /* keep some statistics */
2641 if (xfer->error == USB_ERR_CANCELLED) {
2642 info->udev->stats_cancelled.uds_requests
2643 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2644 } else if (xfer->error != USB_ERR_NORMAL_COMPLETION) {
2645 info->udev->stats_err.uds_requests
2646 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2647 } else {
2648 info->udev->stats_ok.uds_requests
2649 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2650 }
2651
2652 /* call the USB transfer callback */
2653 usbd_callback_ss_done_defer(xfer);
2654 }
2655
2656 /*------------------------------------------------------------------------*
2657 * usbd_transfer_start_cb
2658 *
2659 * This function is called to start the USB transfer when
2660 * "xfer->interval" is greater than zero, and and the endpoint type is
2661 * BULK or CONTROL.
2662 *------------------------------------------------------------------------*/
2663 static void
usbd_transfer_start_cb(void * arg)2664 usbd_transfer_start_cb(void *arg)
2665 {
2666 struct usb_xfer *xfer = arg;
2667 struct usb_endpoint *ep = xfer->endpoint;
2668
2669 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2670
2671 DPRINTF("start\n");
2672
2673 #if USB_HAVE_PF
2674 usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2675 #endif
2676
2677 /* the transfer can now be cancelled */
2678 xfer->flags_int.can_cancel_immed = 1;
2679
2680 /* start USB transfer, if no error */
2681 if (xfer->error == 0)
2682 (ep->methods->start) (xfer);
2683
2684 /* check for transfer error */
2685 if (xfer->error) {
2686 /* some error has happened */
2687 usbd_transfer_done(xfer, 0);
2688 }
2689 }
2690
2691 /*------------------------------------------------------------------------*
2692 * usbd_xfer_set_zlp
2693 *
2694 * This function sets the USB transfers ZLP flag.
2695 *------------------------------------------------------------------------*/
2696 void
usbd_xfer_set_zlp(struct usb_xfer * xfer)2697 usbd_xfer_set_zlp(struct usb_xfer *xfer)
2698 {
2699 if (xfer == NULL) {
2700 /* tearing down */
2701 return;
2702 }
2703 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2704
2705 /* avoid any races by locking the USB mutex */
2706 USB_BUS_LOCK(xfer->xroot->bus);
2707 xfer->flags.send_zlp = 1;
2708 USB_BUS_UNLOCK(xfer->xroot->bus);
2709 }
2710
2711 /*------------------------------------------------------------------------*
2712 * usbd_xfer_get_and_clr_zlp
2713 *
2714 * This function gets and clears the USB transfers ZLP flag and
2715 * queues a zero-length USB transfer if the flag was set.
2716 *------------------------------------------------------------------------*/
2717 uint8_t
usbd_xfer_get_and_clr_zlp(struct usb_xfer * xfer)2718 usbd_xfer_get_and_clr_zlp(struct usb_xfer *xfer)
2719 {
2720 uint8_t retval;
2721
2722 if (xfer == NULL) {
2723 /* tearing down */
2724 return (0);
2725 }
2726 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2727
2728 retval = xfer->flags.send_zlp;
2729
2730 if (retval != 0) {
2731 DPRINTFN(1, "Sending zero-length packet.\n");
2732
2733 /* avoid any races by locking the USB mutex */
2734 USB_BUS_LOCK(xfer->xroot->bus);
2735 xfer->flags.send_zlp = 0;
2736 USB_BUS_UNLOCK(xfer->xroot->bus);
2737
2738 /* queue up a zero-length packet */
2739 usbd_xfer_set_frame_len(xfer, 0, 0);
2740 usbd_xfer_set_frames(xfer, 1);
2741 usbd_transfer_submit(xfer);
2742 }
2743 return (retval);
2744 }
2745
2746 /*------------------------------------------------------------------------*
2747 * usbd_xfer_set_stall
2748 *
2749 * This function is used to set the stall flag outside the
2750 * callback. This function is NULL safe.
2751 *------------------------------------------------------------------------*/
2752 void
usbd_xfer_set_stall(struct usb_xfer * xfer)2753 usbd_xfer_set_stall(struct usb_xfer *xfer)
2754 {
2755 if (xfer == NULL) {
2756 /* tearing down */
2757 return;
2758 }
2759 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2760
2761 /* avoid any races by locking the USB mutex */
2762 USB_BUS_LOCK(xfer->xroot->bus);
2763 xfer->flags.stall_pipe = 1;
2764 USB_BUS_UNLOCK(xfer->xroot->bus);
2765 }
2766
2767 int
usbd_xfer_is_stalled(struct usb_xfer * xfer)2768 usbd_xfer_is_stalled(struct usb_xfer *xfer)
2769 {
2770 return (xfer->endpoint->is_stalled);
2771 }
2772
2773 /*------------------------------------------------------------------------*
2774 * usbd_transfer_clear_stall
2775 *
2776 * This function is used to clear the stall flag outside the
2777 * callback. This function is NULL safe.
2778 *------------------------------------------------------------------------*/
2779 void
usbd_transfer_clear_stall(struct usb_xfer * xfer)2780 usbd_transfer_clear_stall(struct usb_xfer *xfer)
2781 {
2782 if (xfer == NULL) {
2783 /* tearing down */
2784 return;
2785 }
2786 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2787
2788 /* avoid any races by locking the USB mutex */
2789 USB_BUS_LOCK(xfer->xroot->bus);
2790 xfer->flags.stall_pipe = 0;
2791 USB_BUS_UNLOCK(xfer->xroot->bus);
2792 }
2793
2794 /*------------------------------------------------------------------------*
2795 * usbd_pipe_start
2796 *
2797 * This function is used to add an USB transfer to the pipe transfer list.
2798 *------------------------------------------------------------------------*/
2799 void
usbd_pipe_start(struct usb_xfer_queue * pq)2800 usbd_pipe_start(struct usb_xfer_queue *pq)
2801 {
2802 struct usb_endpoint *ep;
2803 struct usb_xfer *xfer;
2804 uint8_t type;
2805
2806 xfer = pq->curr;
2807 ep = xfer->endpoint;
2808
2809 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2810
2811 /*
2812 * If the endpoint is already stalled we do nothing !
2813 */
2814 if (ep->is_stalled) {
2815 return;
2816 }
2817 /*
2818 * Check if we are supposed to stall the endpoint:
2819 */
2820 if (xfer->flags.stall_pipe) {
2821 struct usb_device *udev;
2822 struct usb_xfer_root *info;
2823
2824 /* clear stall command */
2825 xfer->flags.stall_pipe = 0;
2826
2827 /* get pointer to USB device */
2828 info = xfer->xroot;
2829 udev = info->udev;
2830
2831 /*
2832 * Only stall BULK and INTERRUPT endpoints.
2833 */
2834 type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2835 if ((type == UE_BULK) ||
2836 (type == UE_INTERRUPT)) {
2837 uint8_t did_stall;
2838
2839 did_stall = 1;
2840
2841 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2842 (udev->bus->methods->set_stall) (
2843 udev, ep, &did_stall);
2844 } else if (udev->ctrl_xfer[1]) {
2845 info = udev->ctrl_xfer[1]->xroot;
2846 usb_proc_msignal(
2847 USB_BUS_CS_PROC(info->bus),
2848 &udev->cs_msg[0], &udev->cs_msg[1]);
2849 } else {
2850 /* should not happen */
2851 DPRINTFN(0, "No stall handler\n");
2852 }
2853 /*
2854 * Check if we should stall. Some USB hardware
2855 * handles set- and clear-stall in hardware.
2856 */
2857 if (did_stall) {
2858 /*
2859 * The transfer will be continued when
2860 * the clear-stall control endpoint
2861 * message is received.
2862 */
2863 ep->is_stalled = 1;
2864 return;
2865 }
2866 } else if (type == UE_ISOCHRONOUS) {
2867 /*
2868 * Make sure any FIFO overflow or other FIFO
2869 * error conditions go away by resetting the
2870 * endpoint FIFO through the clear stall
2871 * method.
2872 */
2873 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2874 (udev->bus->methods->clear_stall) (udev, ep);
2875 }
2876 }
2877 }
2878 /* Set or clear stall complete - special case */
2879 if (xfer->nframes == 0) {
2880 /* we are complete */
2881 xfer->aframes = 0;
2882 usbd_transfer_done(xfer, 0);
2883 return;
2884 }
2885 /*
2886 * Handled cases:
2887 *
2888 * 1) Start the first transfer queued.
2889 *
2890 * 2) Re-start the current USB transfer.
2891 */
2892 /*
2893 * Check if there should be any
2894 * pre transfer start delay:
2895 */
2896 if (xfer->interval > 0) {
2897 type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2898 if ((type == UE_BULK) ||
2899 (type == UE_CONTROL)) {
2900 usbd_transfer_timeout_ms(xfer,
2901 &usbd_transfer_start_cb,
2902 xfer->interval);
2903 return;
2904 }
2905 }
2906 DPRINTF("start\n");
2907
2908 #if USB_HAVE_PF
2909 usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2910 #endif
2911 /* the transfer can now be cancelled */
2912 xfer->flags_int.can_cancel_immed = 1;
2913
2914 /* start USB transfer, if no error */
2915 if (xfer->error == 0)
2916 (ep->methods->start) (xfer);
2917
2918 /* check for transfer error */
2919 if (xfer->error) {
2920 /* some error has happened */
2921 usbd_transfer_done(xfer, 0);
2922 }
2923 }
2924
2925 /*------------------------------------------------------------------------*
2926 * usbd_transfer_timeout_ms
2927 *
2928 * This function is used to setup a timeout on the given USB
2929 * transfer. If the timeout has been deferred the callback given by
2930 * "cb" will get called after "ms" milliseconds.
2931 *------------------------------------------------------------------------*/
2932 void
usbd_transfer_timeout_ms(struct usb_xfer * xfer,void (* cb)(void * arg),usb_timeout_t ms)2933 usbd_transfer_timeout_ms(struct usb_xfer *xfer,
2934 void (*cb) (void *arg), usb_timeout_t ms)
2935 {
2936 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2937
2938 /* defer delay */
2939 usb_callout_reset(&xfer->timeout_handle,
2940 USB_MS_TO_TICKS(ms) + USB_CALLOUT_ZERO_TICKS, cb, xfer);
2941 }
2942
2943 /*------------------------------------------------------------------------*
2944 * usbd_callback_wrapper_sub
2945 *
2946 * - This function will update variables in an USB transfer after
2947 * that the USB transfer is complete.
2948 *
2949 * - This function is used to start the next USB transfer on the
2950 * ep transfer queue, if any.
2951 *
2952 * NOTE: In some special cases the USB transfer will not be removed from
2953 * the pipe queue, but remain first. To enforce USB transfer removal call
2954 * this function passing the error code "USB_ERR_CANCELLED".
2955 *
2956 * Return values:
2957 * 0: Success.
2958 * Else: The callback has been deferred.
2959 *------------------------------------------------------------------------*/
2960 static uint8_t
usbd_callback_wrapper_sub(struct usb_xfer * xfer)2961 usbd_callback_wrapper_sub(struct usb_xfer *xfer)
2962 {
2963 struct usb_endpoint *ep;
2964 struct usb_bus *bus;
2965 usb_frcount_t x;
2966
2967 bus = xfer->xroot->bus;
2968
2969 if ((!xfer->flags_int.open) &&
2970 (!xfer->flags_int.did_close)) {
2971 DPRINTF("close\n");
2972 USB_BUS_LOCK(bus);
2973 (xfer->endpoint->methods->close) (xfer);
2974 USB_BUS_UNLOCK(bus);
2975 /* only close once */
2976 xfer->flags_int.did_close = 1;
2977 return (1); /* wait for new callback */
2978 }
2979 /*
2980 * If we have a non-hardware induced error we
2981 * need to do the DMA delay!
2982 */
2983 if (xfer->error != 0 && !xfer->flags_int.did_dma_delay &&
2984 (xfer->error == USB_ERR_CANCELLED ||
2985 xfer->error == USB_ERR_TIMEOUT ||
2986 bus->methods->start_dma_delay != NULL)) {
2987 usb_timeout_t temp;
2988
2989 /* only delay once */
2990 xfer->flags_int.did_dma_delay = 1;
2991
2992 /* we can not cancel this delay */
2993 xfer->flags_int.can_cancel_immed = 0;
2994
2995 temp = usbd_get_dma_delay(xfer->xroot->udev);
2996
2997 DPRINTFN(3, "DMA delay, %u ms, "
2998 "on %p\n", temp, xfer);
2999
3000 if (temp != 0) {
3001 USB_BUS_LOCK(bus);
3002 /*
3003 * Some hardware solutions have dedicated
3004 * events when it is safe to free DMA'ed
3005 * memory. For the other hardware platforms we
3006 * use a static delay.
3007 */
3008 if (bus->methods->start_dma_delay != NULL) {
3009 (bus->methods->start_dma_delay) (xfer);
3010 } else {
3011 usbd_transfer_timeout_ms(xfer,
3012 (void (*)(void *))&usb_dma_delay_done_cb,
3013 temp);
3014 }
3015 USB_BUS_UNLOCK(bus);
3016 return (1); /* wait for new callback */
3017 }
3018 }
3019 /* check actual number of frames */
3020 if (xfer->aframes > xfer->nframes) {
3021 if (xfer->error == 0) {
3022 panic("%s: actual number of frames, %d, is "
3023 "greater than initial number of frames, %d\n",
3024 __FUNCTION__, xfer->aframes, xfer->nframes);
3025 } else {
3026 /* just set some valid value */
3027 xfer->aframes = xfer->nframes;
3028 }
3029 }
3030 /* compute actual length */
3031 xfer->actlen = 0;
3032
3033 for (x = 0; x != xfer->aframes; x++) {
3034 xfer->actlen += xfer->frlengths[x];
3035 }
3036
3037 /*
3038 * Frames that were not transferred get zero actual length in
3039 * case the USB device driver does not check the actual number
3040 * of frames transferred, "xfer->aframes":
3041 */
3042 for (; x < xfer->nframes; x++) {
3043 usbd_xfer_set_frame_len(xfer, x, 0);
3044 }
3045
3046 /* check actual length */
3047 if (xfer->actlen > xfer->sumlen) {
3048 if (xfer->error == 0) {
3049 panic("%s: actual length, %d, is greater than "
3050 "initial length, %d\n",
3051 __FUNCTION__, xfer->actlen, xfer->sumlen);
3052 } else {
3053 /* just set some valid value */
3054 xfer->actlen = xfer->sumlen;
3055 }
3056 }
3057 DPRINTFN(1, "xfer=%p endpoint=%p sts=%d alen=%d, slen=%d, afrm=%d, nfrm=%d\n",
3058 xfer, xfer->endpoint, xfer->error, xfer->actlen, xfer->sumlen,
3059 xfer->aframes, xfer->nframes);
3060
3061 if (xfer->error) {
3062 /* end of control transfer, if any */
3063 xfer->flags_int.control_act = 0;
3064
3065 #if USB_HAVE_TT_SUPPORT
3066 switch (xfer->error) {
3067 case USB_ERR_NORMAL_COMPLETION:
3068 case USB_ERR_SHORT_XFER:
3069 case USB_ERR_STALLED:
3070 case USB_ERR_CANCELLED:
3071 /* nothing to do */
3072 break;
3073 default:
3074 /* try to reset the TT, if any */
3075 USB_BUS_LOCK(bus);
3076 uhub_tt_buffer_reset_async_locked(xfer->xroot->udev, xfer->endpoint);
3077 USB_BUS_UNLOCK(bus);
3078 break;
3079 }
3080 #endif
3081 /* check if we should block the execution queue */
3082 if ((xfer->error != USB_ERR_CANCELLED) &&
3083 (xfer->flags.pipe_bof)) {
3084 DPRINTFN(2, "xfer=%p: Block On Failure "
3085 "on endpoint=%p\n", xfer, xfer->endpoint);
3086 goto done;
3087 }
3088 } else {
3089 /* check for short transfers */
3090 if (xfer->actlen < xfer->sumlen) {
3091 /* end of control transfer, if any */
3092 xfer->flags_int.control_act = 0;
3093
3094 if (!xfer->flags_int.short_xfer_ok) {
3095 xfer->error = USB_ERR_SHORT_XFER;
3096 if (xfer->flags.pipe_bof) {
3097 DPRINTFN(2, "xfer=%p: Block On Failure on "
3098 "Short Transfer on endpoint %p.\n",
3099 xfer, xfer->endpoint);
3100 goto done;
3101 }
3102 }
3103 } else {
3104 /*
3105 * Check if we are in the middle of a
3106 * control transfer:
3107 */
3108 if (xfer->flags_int.control_act) {
3109 DPRINTFN(5, "xfer=%p: Control transfer "
3110 "active on endpoint=%p\n", xfer, xfer->endpoint);
3111 goto done;
3112 }
3113 }
3114 }
3115
3116 ep = xfer->endpoint;
3117
3118 /*
3119 * If the current USB transfer is completing we need to start the
3120 * next one:
3121 */
3122 USB_BUS_LOCK(bus);
3123 if (ep->endpoint_q[xfer->stream_id].curr == xfer) {
3124 usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], NULL);
3125
3126 if (ep->endpoint_q[xfer->stream_id].curr != NULL ||
3127 TAILQ_FIRST(&ep->endpoint_q[xfer->stream_id].head) != NULL) {
3128 /* there is another USB transfer waiting */
3129 } else {
3130 /* this is the last USB transfer */
3131 /* clear isochronous sync flag */
3132 xfer->endpoint->is_synced = 0;
3133 }
3134 }
3135 USB_BUS_UNLOCK(bus);
3136 done:
3137 return (0);
3138 }
3139
3140 /*------------------------------------------------------------------------*
3141 * usb_command_wrapper
3142 *
3143 * This function is used to execute commands non-recursivly on an USB
3144 * transfer.
3145 *------------------------------------------------------------------------*/
3146 void
usb_command_wrapper(struct usb_xfer_queue * pq,struct usb_xfer * xfer)3147 usb_command_wrapper(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
3148 {
3149 if (xfer) {
3150 /*
3151 * If the transfer is not already processing,
3152 * queue it!
3153 */
3154 if (pq->curr != xfer) {
3155 usbd_transfer_enqueue(pq, xfer);
3156 if (pq->curr != NULL) {
3157 /* something is already processing */
3158 DPRINTFN(6, "busy %p\n", pq->curr);
3159 return;
3160 }
3161 }
3162 } else {
3163 /* Get next element in queue */
3164 pq->curr = NULL;
3165 }
3166
3167 if (!pq->recurse_1) {
3168 /* clear third recurse flag */
3169 pq->recurse_3 = 0;
3170
3171 do {
3172 /* set two first recurse flags */
3173 pq->recurse_1 = 1;
3174 pq->recurse_2 = 1;
3175
3176 if (pq->curr == NULL) {
3177 xfer = TAILQ_FIRST(&pq->head);
3178 if (xfer) {
3179 TAILQ_REMOVE(&pq->head, xfer,
3180 wait_entry);
3181 xfer->wait_queue = NULL;
3182 pq->curr = xfer;
3183 } else {
3184 break;
3185 }
3186 }
3187 DPRINTFN(6, "cb %p (enter)\n", pq->curr);
3188 (pq->command) (pq);
3189 DPRINTFN(6, "cb %p (leave)\n", pq->curr);
3190
3191 /*
3192 * Set third recurse flag to indicate
3193 * recursion happened:
3194 */
3195 pq->recurse_3 = 1;
3196
3197 } while (!pq->recurse_2);
3198
3199 /* clear first recurse flag */
3200 pq->recurse_1 = 0;
3201
3202 } else {
3203 /* clear second recurse flag */
3204 pq->recurse_2 = 0;
3205 }
3206 }
3207
3208 /*------------------------------------------------------------------------*
3209 * usbd_ctrl_transfer_setup
3210 *
3211 * This function is used to setup the default USB control endpoint
3212 * transfer.
3213 *------------------------------------------------------------------------*/
3214 void
usbd_ctrl_transfer_setup(struct usb_device * udev)3215 usbd_ctrl_transfer_setup(struct usb_device *udev)
3216 {
3217 struct usb_xfer *xfer;
3218 uint8_t no_resetup;
3219 uint8_t iface_index;
3220
3221 /* check for root HUB */
3222 if (udev->parent_hub == NULL)
3223 return;
3224 repeat:
3225
3226 xfer = udev->ctrl_xfer[0];
3227 if (xfer) {
3228 USB_XFER_LOCK(xfer);
3229 no_resetup =
3230 ((xfer->address == udev->address) &&
3231 (udev->ctrl_ep_desc.wMaxPacketSize[0] ==
3232 udev->ddesc.bMaxPacketSize));
3233 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
3234 if (no_resetup) {
3235 /*
3236 * NOTE: checking "xfer->address" and
3237 * starting the USB transfer must be
3238 * atomic!
3239 */
3240 usbd_transfer_start(xfer);
3241 }
3242 }
3243 USB_XFER_UNLOCK(xfer);
3244 } else {
3245 no_resetup = 0;
3246 }
3247
3248 if (no_resetup) {
3249 /*
3250 * All parameters are exactly the same like before.
3251 * Just return.
3252 */
3253 return;
3254 }
3255 /*
3256 * Update wMaxPacketSize for the default control endpoint:
3257 */
3258 udev->ctrl_ep_desc.wMaxPacketSize[0] =
3259 udev->ddesc.bMaxPacketSize;
3260
3261 /*
3262 * Unsetup any existing USB transfer:
3263 */
3264 usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX);
3265
3266 /*
3267 * Reset clear stall error counter.
3268 */
3269 udev->clear_stall_errors = 0;
3270
3271 /*
3272 * Try to setup a new USB transfer for the
3273 * default control endpoint:
3274 */
3275 iface_index = 0;
3276 if (usbd_transfer_setup(udev, &iface_index,
3277 udev->ctrl_xfer, udev->bus->control_ep_quirk ?
3278 usb_control_ep_quirk_cfg : usb_control_ep_cfg, USB_CTRL_XFER_MAX, NULL,
3279 &udev->device_mtx)) {
3280 DPRINTFN(0, "could not setup default "
3281 "USB transfer\n");
3282 } else {
3283 goto repeat;
3284 }
3285 }
3286
3287 /*------------------------------------------------------------------------*
3288 * usbd_clear_data_toggle - factored out code
3289 *
3290 * NOTE: the intention of this function is not to reset the hardware
3291 * data toggle.
3292 *------------------------------------------------------------------------*/
3293 void
usbd_clear_stall_locked(struct usb_device * udev,struct usb_endpoint * ep)3294 usbd_clear_stall_locked(struct usb_device *udev, struct usb_endpoint *ep)
3295 {
3296 USB_BUS_LOCK_ASSERT(udev->bus, MA_OWNED);
3297
3298 /* check that we have a valid case */
3299 if (udev->flags.usb_mode == USB_MODE_HOST &&
3300 udev->parent_hub != NULL &&
3301 udev->bus->methods->clear_stall != NULL &&
3302 ep->methods != NULL) {
3303 (udev->bus->methods->clear_stall) (udev, ep);
3304 }
3305 }
3306
3307 /*------------------------------------------------------------------------*
3308 * usbd_clear_data_toggle - factored out code
3309 *
3310 * NOTE: the intention of this function is not to reset the hardware
3311 * data toggle on the USB device side.
3312 *------------------------------------------------------------------------*/
3313 void
usbd_clear_data_toggle(struct usb_device * udev,struct usb_endpoint * ep)3314 usbd_clear_data_toggle(struct usb_device *udev, struct usb_endpoint *ep)
3315 {
3316 DPRINTFN(5, "udev=%p endpoint=%p\n", udev, ep);
3317
3318 USB_BUS_LOCK(udev->bus);
3319 ep->toggle_next = 0;
3320 /* some hardware needs a callback to clear the data toggle */
3321 usbd_clear_stall_locked(udev, ep);
3322 USB_BUS_UNLOCK(udev->bus);
3323 }
3324
3325 /*------------------------------------------------------------------------*
3326 * usbd_clear_stall_callback - factored out clear stall callback
3327 *
3328 * Input parameters:
3329 * xfer1: Clear Stall Control Transfer
3330 * xfer2: Stalled USB Transfer
3331 *
3332 * This function is NULL safe.
3333 *
3334 * Return values:
3335 * 0: In progress
3336 * Else: Finished
3337 *
3338 * Clear stall config example:
3339 *
3340 * static const struct usb_config my_clearstall = {
3341 * .type = UE_CONTROL,
3342 * .endpoint = 0,
3343 * .direction = UE_DIR_ANY,
3344 * .interval = 50, //50 milliseconds
3345 * .bufsize = sizeof(struct usb_device_request),
3346 * .timeout = 1000, //1.000 seconds
3347 * .callback = &my_clear_stall_callback, // **
3348 * .usb_mode = USB_MODE_HOST,
3349 * };
3350 *
3351 * ** "my_clear_stall_callback" calls "usbd_clear_stall_callback"
3352 * passing the correct parameters.
3353 *------------------------------------------------------------------------*/
3354 uint8_t
usbd_clear_stall_callback(struct usb_xfer * xfer1,struct usb_xfer * xfer2)3355 usbd_clear_stall_callback(struct usb_xfer *xfer1,
3356 struct usb_xfer *xfer2)
3357 {
3358 struct usb_device_request req;
3359
3360 if (xfer2 == NULL) {
3361 /* looks like we are tearing down */
3362 DPRINTF("NULL input parameter\n");
3363 return (0);
3364 }
3365 USB_XFER_LOCK_ASSERT(xfer1, MA_OWNED);
3366 USB_XFER_LOCK_ASSERT(xfer2, MA_OWNED);
3367
3368 switch (USB_GET_STATE(xfer1)) {
3369 case USB_ST_SETUP:
3370
3371 /*
3372 * pre-clear the data toggle to DATA0 ("umass.c" and
3373 * "ata-usb.c" depends on this)
3374 */
3375
3376 usbd_clear_data_toggle(xfer2->xroot->udev, xfer2->endpoint);
3377
3378 /* setup a clear-stall packet */
3379
3380 req.bmRequestType = UT_WRITE_ENDPOINT;
3381 req.bRequest = UR_CLEAR_FEATURE;
3382 USETW(req.wValue, UF_ENDPOINT_HALT);
3383 req.wIndex[0] = xfer2->endpoint->edesc->bEndpointAddress;
3384 req.wIndex[1] = 0;
3385 USETW(req.wLength, 0);
3386
3387 /*
3388 * "usbd_transfer_setup_sub()" will ensure that
3389 * we have sufficient room in the buffer for
3390 * the request structure!
3391 */
3392
3393 /* copy in the transfer */
3394
3395 usbd_copy_in(xfer1->frbuffers, 0, &req, sizeof(req));
3396
3397 /* set length */
3398 xfer1->frlengths[0] = sizeof(req);
3399 xfer1->nframes = 1;
3400
3401 usbd_transfer_submit(xfer1);
3402 return (0);
3403
3404 case USB_ST_TRANSFERRED:
3405 break;
3406
3407 default: /* Error */
3408 if (xfer1->error == USB_ERR_CANCELLED) {
3409 return (0);
3410 }
3411 break;
3412 }
3413 return (1); /* Clear Stall Finished */
3414 }
3415
3416 /*------------------------------------------------------------------------*
3417 * usbd_transfer_poll
3418 *
3419 * The following function gets called from the USB keyboard driver and
3420 * UMASS when the system has panicked.
3421 *
3422 * NOTE: It is currently not possible to resume normal operation on
3423 * the USB controller which has been polled, due to clearing of the
3424 * "up_dsleep" and "up_msleep" flags.
3425 *------------------------------------------------------------------------*/
3426 void
usbd_transfer_poll(struct usb_xfer ** ppxfer,uint16_t max)3427 usbd_transfer_poll(struct usb_xfer **ppxfer, uint16_t max)
3428 {
3429 struct usb_xfer *xfer;
3430 struct usb_xfer_root *xroot;
3431 struct usb_device *udev;
3432 struct usb_proc_msg *pm;
3433 struct usb_bus *bus;
3434 uint16_t n;
3435 uint16_t drop_bus_spin;
3436 uint16_t drop_bus;
3437 uint16_t drop_xfer;
3438
3439 for (n = 0; n != max; n++) {
3440 /* Extra checks to avoid panic */
3441 xfer = ppxfer[n];
3442 if (xfer == NULL)
3443 continue; /* no USB transfer */
3444 xroot = xfer->xroot;
3445 if (xroot == NULL)
3446 continue; /* no USB root */
3447 udev = xroot->udev;
3448 if (udev == NULL)
3449 continue; /* no USB device */
3450 bus = udev->bus;
3451 if (bus == NULL)
3452 continue; /* no BUS structure */
3453 if (bus->methods == NULL)
3454 continue; /* no BUS methods */
3455 if (bus->methods->xfer_poll == NULL)
3456 continue; /* no poll method */
3457
3458 drop_bus_spin = 0;
3459 drop_bus = 0;
3460 drop_xfer = 0;
3461
3462 if (USB_IN_POLLING_MODE_FUNC() == 0) {
3463 /* make sure that the BUS spin mutex is not locked */
3464 while (mtx_owned(&bus->bus_spin_lock)) {
3465 mtx_unlock_spin(&bus->bus_spin_lock);
3466 drop_bus_spin++;
3467 }
3468
3469 /* make sure that the BUS mutex is not locked */
3470 while (mtx_owned(&bus->bus_mtx)) {
3471 mtx_unlock(&bus->bus_mtx);
3472 drop_bus++;
3473 }
3474
3475 /* make sure that the transfer mutex is not locked */
3476 while (mtx_owned(xroot->xfer_mtx)) {
3477 mtx_unlock(xroot->xfer_mtx);
3478 drop_xfer++;
3479 }
3480 }
3481
3482 /* Make sure cv_signal() and cv_broadcast() is not called */
3483 USB_BUS_CONTROL_XFER_PROC(bus)->up_msleep = 0;
3484 USB_BUS_EXPLORE_PROC(bus)->up_msleep = 0;
3485 USB_BUS_GIANT_PROC(bus)->up_msleep = 0;
3486 USB_BUS_NON_GIANT_ISOC_PROC(bus)->up_msleep = 0;
3487 USB_BUS_NON_GIANT_BULK_PROC(bus)->up_msleep = 0;
3488
3489 /* poll USB hardware */
3490 (bus->methods->xfer_poll) (bus);
3491
3492 USB_BUS_LOCK(xroot->bus);
3493
3494 /* check for clear stall */
3495 if (udev->ctrl_xfer[1] != NULL) {
3496 /* poll clear stall start */
3497 pm = &udev->cs_msg[0].hdr;
3498 (pm->pm_callback) (pm);
3499 /* poll clear stall done thread */
3500 pm = &udev->ctrl_xfer[1]->
3501 xroot->done_m[0].hdr;
3502 (pm->pm_callback) (pm);
3503 }
3504
3505 /* poll done thread */
3506 pm = &xroot->done_m[0].hdr;
3507 (pm->pm_callback) (pm);
3508
3509 USB_BUS_UNLOCK(xroot->bus);
3510
3511 /* restore transfer mutex */
3512 while (drop_xfer--)
3513 mtx_lock(xroot->xfer_mtx);
3514
3515 /* restore BUS mutex */
3516 while (drop_bus--)
3517 mtx_lock(&bus->bus_mtx);
3518
3519 /* restore BUS spin mutex */
3520 while (drop_bus_spin--)
3521 mtx_lock_spin(&bus->bus_spin_lock);
3522 }
3523 }
3524
3525 static void
usbd_get_std_packet_size(struct usb_std_packet_size * ptr,uint8_t type,enum usb_dev_speed speed)3526 usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
3527 uint8_t type, enum usb_dev_speed speed)
3528 {
3529 static const uint16_t intr_range_max[USB_SPEED_MAX] = {
3530 [USB_SPEED_LOW] = 8,
3531 [USB_SPEED_FULL] = 64,
3532 [USB_SPEED_HIGH] = 1024,
3533 [USB_SPEED_VARIABLE] = 1024,
3534 [USB_SPEED_SUPER] = 1024,
3535 };
3536
3537 static const uint16_t isoc_range_max[USB_SPEED_MAX] = {
3538 [USB_SPEED_LOW] = 0, /* invalid */
3539 [USB_SPEED_FULL] = 1023,
3540 [USB_SPEED_HIGH] = 1024,
3541 [USB_SPEED_VARIABLE] = 3584,
3542 [USB_SPEED_SUPER] = 1024,
3543 };
3544
3545 static const uint16_t control_min[USB_SPEED_MAX] = {
3546 [USB_SPEED_LOW] = 8,
3547 [USB_SPEED_FULL] = 8,
3548 [USB_SPEED_HIGH] = 64,
3549 [USB_SPEED_VARIABLE] = 512,
3550 [USB_SPEED_SUPER] = 512,
3551 };
3552
3553 static const uint16_t bulk_min[USB_SPEED_MAX] = {
3554 [USB_SPEED_LOW] = 8,
3555 [USB_SPEED_FULL] = 8,
3556 [USB_SPEED_HIGH] = 512,
3557 [USB_SPEED_VARIABLE] = 512,
3558 [USB_SPEED_SUPER] = 1024,
3559 };
3560
3561 uint16_t temp;
3562
3563 memset(ptr, 0, sizeof(*ptr));
3564
3565 switch (type) {
3566 case UE_INTERRUPT:
3567 ptr->range.max = intr_range_max[speed];
3568 break;
3569 case UE_ISOCHRONOUS:
3570 ptr->range.max = isoc_range_max[speed];
3571 break;
3572 default:
3573 if (type == UE_BULK)
3574 temp = bulk_min[speed];
3575 else /* UE_CONTROL */
3576 temp = control_min[speed];
3577
3578 /* default is fixed */
3579 ptr->fixed[0] = temp;
3580 ptr->fixed[1] = temp;
3581 ptr->fixed[2] = temp;
3582 ptr->fixed[3] = temp;
3583
3584 if (speed == USB_SPEED_FULL) {
3585 /* multiple sizes */
3586 ptr->fixed[1] = 16;
3587 ptr->fixed[2] = 32;
3588 ptr->fixed[3] = 64;
3589 }
3590 if ((speed == USB_SPEED_VARIABLE) &&
3591 (type == UE_BULK)) {
3592 /* multiple sizes */
3593 ptr->fixed[2] = 1024;
3594 ptr->fixed[3] = 1536;
3595 }
3596 break;
3597 }
3598 }
3599
3600 void *
usbd_xfer_softc(struct usb_xfer * xfer)3601 usbd_xfer_softc(struct usb_xfer *xfer)
3602 {
3603 return (xfer->priv_sc);
3604 }
3605
3606 void *
usbd_xfer_get_priv(struct usb_xfer * xfer)3607 usbd_xfer_get_priv(struct usb_xfer *xfer)
3608 {
3609 return (xfer->priv_fifo);
3610 }
3611
3612 void
usbd_xfer_set_priv(struct usb_xfer * xfer,void * ptr)3613 usbd_xfer_set_priv(struct usb_xfer *xfer, void *ptr)
3614 {
3615 xfer->priv_fifo = ptr;
3616 }
3617
3618 uint8_t
usbd_xfer_state(struct usb_xfer * xfer)3619 usbd_xfer_state(struct usb_xfer *xfer)
3620 {
3621 return (xfer->usb_state);
3622 }
3623
3624 void
usbd_xfer_set_flag(struct usb_xfer * xfer,int flag)3625 usbd_xfer_set_flag(struct usb_xfer *xfer, int flag)
3626 {
3627 switch (flag) {
3628 case USB_FORCE_SHORT_XFER:
3629 xfer->flags.force_short_xfer = 1;
3630 break;
3631 case USB_SHORT_XFER_OK:
3632 xfer->flags.short_xfer_ok = 1;
3633 break;
3634 case USB_MULTI_SHORT_OK:
3635 xfer->flags.short_frames_ok = 1;
3636 break;
3637 case USB_MANUAL_STATUS:
3638 xfer->flags.manual_status = 1;
3639 break;
3640 }
3641 }
3642
3643 void
usbd_xfer_clr_flag(struct usb_xfer * xfer,int flag)3644 usbd_xfer_clr_flag(struct usb_xfer *xfer, int flag)
3645 {
3646 switch (flag) {
3647 case USB_FORCE_SHORT_XFER:
3648 xfer->flags.force_short_xfer = 0;
3649 break;
3650 case USB_SHORT_XFER_OK:
3651 xfer->flags.short_xfer_ok = 0;
3652 break;
3653 case USB_MULTI_SHORT_OK:
3654 xfer->flags.short_frames_ok = 0;
3655 break;
3656 case USB_MANUAL_STATUS:
3657 xfer->flags.manual_status = 0;
3658 break;
3659 }
3660 }
3661
3662 /*
3663 * The following function returns in milliseconds when the isochronous
3664 * transfer was completed by the hardware. The returned value wraps
3665 * around 65536 milliseconds.
3666 */
3667 uint16_t
usbd_xfer_get_timestamp(struct usb_xfer * xfer)3668 usbd_xfer_get_timestamp(struct usb_xfer *xfer)
3669 {
3670 return (xfer->isoc_time_complete);
3671 }
3672
3673 /*
3674 * The following function returns non-zero if the max packet size
3675 * field was clamped to a valid value. Else it returns zero.
3676 */
3677 uint8_t
usbd_xfer_maxp_was_clamped(struct usb_xfer * xfer)3678 usbd_xfer_maxp_was_clamped(struct usb_xfer *xfer)
3679 {
3680 return (xfer->flags_int.maxp_was_clamped);
3681 }
3682
3683 /*
3684 * The following function computes the next isochronous frame number
3685 * where the first isochronous packet should be queued.
3686 *
3687 * The function returns non-zero if there was a discontinuity.
3688 * Else zero is returned for normal operation.
3689 */
3690 uint8_t
usbd_xfer_get_isochronous_start_frame(struct usb_xfer * xfer,uint32_t frame_curr,uint32_t frame_min,uint32_t frame_ms,uint32_t frame_mask,uint32_t * p_frame_start)3691 usbd_xfer_get_isochronous_start_frame(struct usb_xfer *xfer, uint32_t frame_curr,
3692 uint32_t frame_min, uint32_t frame_ms, uint32_t frame_mask, uint32_t *p_frame_start)
3693 {
3694 uint32_t duration;
3695 uint32_t delta;
3696 uint8_t retval;
3697 uint8_t shift;
3698
3699 /* Compute time ahead of current schedule. */
3700 delta = (xfer->endpoint->isoc_next - frame_curr) & frame_mask;
3701
3702 /*
3703 * Check if it is the first transfer or if the future frame
3704 * delta is less than one millisecond or if the frame delta is
3705 * negative:
3706 */
3707 if (xfer->endpoint->is_synced == 0 ||
3708 delta < (frame_ms + frame_min) ||
3709 delta > (frame_mask / 2)) {
3710 /* Schedule transfer 2 milliseconds into the future. */
3711 xfer->endpoint->isoc_next = (frame_curr + 2 * frame_ms + frame_min) & frame_mask;
3712 xfer->endpoint->is_synced = 1;
3713
3714 retval = 1;
3715 } else {
3716 retval = 0;
3717 }
3718
3719 /* Store start time, if any. */
3720 if (p_frame_start != NULL)
3721 *p_frame_start = xfer->endpoint->isoc_next & frame_mask;
3722
3723 /* Get relative completion time, in milliseconds. */
3724 delta = xfer->endpoint->isoc_next - frame_curr + (frame_curr % frame_ms);
3725 delta &= frame_mask;
3726 delta /= frame_ms;
3727
3728 switch (usbd_get_speed(xfer->xroot->udev)) {
3729 case USB_SPEED_FULL:
3730 shift = 3;
3731 break;
3732 default:
3733 shift = usbd_xfer_get_fps_shift(xfer);
3734 break;
3735 }
3736
3737 /* Get duration in milliseconds, rounded up. */
3738 duration = ((xfer->nframes << shift) + 7) / 8;
3739
3740 /* Compute full 32-bit completion time, in milliseconds. */
3741 xfer->isoc_time_complete =
3742 usb_isoc_time_expand(xfer->xroot->bus, frame_curr / frame_ms) +
3743 delta + duration;
3744
3745 /* Compute next isochronous frame. */
3746 xfer->endpoint->isoc_next += duration * frame_ms;
3747 xfer->endpoint->isoc_next &= frame_mask;
3748
3749 return (retval);
3750 }
3751