1 /* $FreeBSD$ */ 2 /*- 3 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 4 * 5 * Copyright (c) 2008 Hans Petter Selasky. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #ifdef USB_GLOBAL_INCLUDE_FILE 30 #include USB_GLOBAL_INCLUDE_FILE 31 #else 32 #include <sys/stdint.h> 33 #include <sys/stddef.h> 34 #include <sys/param.h> 35 #include <sys/queue.h> 36 #include <sys/types.h> 37 #include <sys/systm.h> 38 #include <sys/kernel.h> 39 #include <sys/bus.h> 40 #include <sys/module.h> 41 #include <sys/lock.h> 42 #include <sys/mutex.h> 43 #include <sys/condvar.h> 44 #include <sys/sysctl.h> 45 #include <sys/sx.h> 46 #include <sys/unistd.h> 47 #include <sys/callout.h> 48 #include <sys/malloc.h> 49 #include <sys/priv.h> 50 51 #include <dev/usb/usb.h> 52 #include <dev/usb/usbdi.h> 53 #include <dev/usb/usbdi_util.h> 54 55 #define USB_DEBUG_VAR usb_debug 56 57 #include <dev/usb/usb_core.h> 58 #include <dev/usb/usb_busdma.h> 59 #include <dev/usb/usb_process.h> 60 #include <dev/usb/usb_transfer.h> 61 #include <dev/usb/usb_device.h> 62 #include <dev/usb/usb_debug.h> 63 #include <dev/usb/usb_util.h> 64 65 #include <dev/usb/usb_controller.h> 66 #include <dev/usb/usb_bus.h> 67 #include <dev/usb/usb_pf.h> 68 #endif /* USB_GLOBAL_INCLUDE_FILE */ 69 70 struct usb_std_packet_size { 71 struct { 72 uint16_t min; /* inclusive */ 73 uint16_t max; /* inclusive */ 74 } range; 75 76 uint16_t fixed[4]; 77 }; 78 79 static usb_callback_t usb_request_callback; 80 81 static const struct usb_config usb_control_ep_cfg[USB_CTRL_XFER_MAX] = { 82 83 /* This transfer is used for generic control endpoint transfers */ 84 85 [0] = { 86 .type = UE_CONTROL, 87 .endpoint = 0x00, /* Control endpoint */ 88 .direction = UE_DIR_ANY, 89 .bufsize = USB_EP0_BUFSIZE, /* bytes */ 90 .flags = {.proxy_buffer = 1,}, 91 .callback = &usb_request_callback, 92 .usb_mode = USB_MODE_DUAL, /* both modes */ 93 }, 94 95 /* This transfer is used for generic clear stall only */ 96 97 [1] = { 98 .type = UE_CONTROL, 99 .endpoint = 0x00, /* Control pipe */ 100 .direction = UE_DIR_ANY, 101 .bufsize = sizeof(struct usb_device_request), 102 .callback = &usb_do_clear_stall_callback, 103 .timeout = 1000, /* 1 second */ 104 .interval = 50, /* 50ms */ 105 .usb_mode = USB_MODE_HOST, 106 }, 107 }; 108 109 static const struct usb_config usb_control_ep_quirk_cfg[USB_CTRL_XFER_MAX] = { 110 111 /* This transfer is used for generic control endpoint transfers */ 112 113 [0] = { 114 .type = UE_CONTROL, 115 .endpoint = 0x00, /* Control endpoint */ 116 .direction = UE_DIR_ANY, 117 .bufsize = 65535, /* bytes */ 118 .callback = &usb_request_callback, 119 .usb_mode = USB_MODE_DUAL, /* both modes */ 120 }, 121 122 /* This transfer is used for generic clear stall only */ 123 124 [1] = { 125 .type = UE_CONTROL, 126 .endpoint = 0x00, /* Control pipe */ 127 .direction = UE_DIR_ANY, 128 .bufsize = sizeof(struct usb_device_request), 129 .callback = &usb_do_clear_stall_callback, 130 .timeout = 1000, /* 1 second */ 131 .interval = 50, /* 50ms */ 132 .usb_mode = USB_MODE_HOST, 133 }, 134 }; 135 136 /* function prototypes */ 137 138 static void usbd_update_max_frame_size(struct usb_xfer *); 139 static void usbd_transfer_unsetup_sub(struct usb_xfer_root *, uint8_t); 140 static void usbd_control_transfer_init(struct usb_xfer *); 141 static int usbd_setup_ctrl_transfer(struct usb_xfer *); 142 static void usb_callback_proc(struct usb_proc_msg *); 143 static void usbd_callback_ss_done_defer(struct usb_xfer *); 144 static void usbd_callback_wrapper(struct usb_xfer_queue *); 145 static void usbd_transfer_start_cb(void *); 146 static uint8_t usbd_callback_wrapper_sub(struct usb_xfer *); 147 static void usbd_get_std_packet_size(struct usb_std_packet_size *ptr, 148 uint8_t type, enum usb_dev_speed speed); 149 150 /*------------------------------------------------------------------------* 151 * usb_request_callback 152 *------------------------------------------------------------------------*/ 153 static void 154 usb_request_callback(struct usb_xfer *xfer, usb_error_t error) 155 { 156 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) 157 usb_handle_request_callback(xfer, error); 158 else 159 usbd_do_request_callback(xfer, error); 160 } 161 162 /*------------------------------------------------------------------------* 163 * usbd_update_max_frame_size 164 * 165 * This function updates the maximum frame size, hence high speed USB 166 * can transfer multiple consecutive packets. 167 *------------------------------------------------------------------------*/ 168 static void 169 usbd_update_max_frame_size(struct usb_xfer *xfer) 170 { 171 /* compute maximum frame size */ 172 /* this computation should not overflow 16-bit */ 173 /* max = 15 * 1024 */ 174 175 xfer->max_frame_size = xfer->max_packet_size * xfer->max_packet_count; 176 } 177 178 /*------------------------------------------------------------------------* 179 * usbd_get_dma_delay 180 * 181 * The following function is called when we need to 182 * synchronize with DMA hardware. 183 * 184 * Returns: 185 * 0: no DMA delay required 186 * Else: milliseconds of DMA delay 187 *------------------------------------------------------------------------*/ 188 usb_timeout_t 189 usbd_get_dma_delay(struct usb_device *udev) 190 { 191 const struct usb_bus_methods *mtod; 192 uint32_t temp; 193 194 mtod = udev->bus->methods; 195 temp = 0; 196 197 if (mtod->get_dma_delay) { 198 (mtod->get_dma_delay) (udev, &temp); 199 /* 200 * Round up and convert to milliseconds. Note that we use 201 * 1024 milliseconds per second. to save a division. 202 */ 203 temp += 0x3FF; 204 temp /= 0x400; 205 } 206 return (temp); 207 } 208 209 /*------------------------------------------------------------------------* 210 * usbd_transfer_setup_sub_malloc 211 * 212 * This function will allocate one or more DMA'able memory chunks 213 * according to "size", "align" and "count" arguments. "ppc" is 214 * pointed to a linear array of USB page caches afterwards. 215 * 216 * If the "align" argument is equal to "1" a non-contiguous allocation 217 * can happen. Else if the "align" argument is greater than "1", the 218 * allocation will always be contiguous in memory. 219 * 220 * Returns: 221 * 0: Success 222 * Else: Failure 223 *------------------------------------------------------------------------*/ 224 #if USB_HAVE_BUSDMA 225 uint8_t 226 usbd_transfer_setup_sub_malloc(struct usb_setup_params *parm, 227 struct usb_page_cache **ppc, usb_size_t size, usb_size_t align, 228 usb_size_t count) 229 { 230 struct usb_page_cache *pc; 231 struct usb_page *pg; 232 void *buf; 233 usb_size_t n_dma_pc; 234 usb_size_t n_dma_pg; 235 usb_size_t n_obj; 236 usb_size_t x; 237 usb_size_t y; 238 usb_size_t r; 239 usb_size_t z; 240 241 USB_ASSERT(align > 0, ("Invalid alignment, 0x%08x\n", 242 align)); 243 USB_ASSERT(size > 0, ("Invalid size = 0\n")); 244 245 if (count == 0) { 246 return (0); /* nothing to allocate */ 247 } 248 /* 249 * Make sure that the size is aligned properly. 250 */ 251 size = -((-size) & (-align)); 252 253 /* 254 * Try multi-allocation chunks to reduce the number of DMA 255 * allocations, hence DMA allocations are slow. 256 */ 257 if (align == 1) { 258 /* special case - non-cached multi page DMA memory */ 259 n_dma_pc = count; 260 n_dma_pg = (2 + (size / USB_PAGE_SIZE)); 261 n_obj = 1; 262 } else if (size >= USB_PAGE_SIZE) { 263 n_dma_pc = count; 264 n_dma_pg = 1; 265 n_obj = 1; 266 } else { 267 /* compute number of objects per page */ 268 #ifdef USB_DMA_SINGLE_ALLOC 269 n_obj = 1; 270 #else 271 n_obj = (USB_PAGE_SIZE / size); 272 #endif 273 /* 274 * Compute number of DMA chunks, rounded up 275 * to nearest one: 276 */ 277 n_dma_pc = howmany(count, n_obj); 278 n_dma_pg = 1; 279 } 280 281 /* 282 * DMA memory is allocated once, but mapped twice. That's why 283 * there is one list for auto-free and another list for 284 * non-auto-free which only holds the mapping and not the 285 * allocation. 286 */ 287 if (parm->buf == NULL) { 288 /* reserve memory (auto-free) */ 289 parm->dma_page_ptr += n_dma_pc * n_dma_pg; 290 parm->dma_page_cache_ptr += n_dma_pc; 291 292 /* reserve memory (no-auto-free) */ 293 parm->dma_page_ptr += count * n_dma_pg; 294 parm->xfer_page_cache_ptr += count; 295 return (0); 296 } 297 for (x = 0; x != n_dma_pc; x++) { 298 /* need to initialize the page cache */ 299 parm->dma_page_cache_ptr[x].tag_parent = 300 &parm->curr_xfer->xroot->dma_parent_tag; 301 } 302 for (x = 0; x != count; x++) { 303 /* need to initialize the page cache */ 304 parm->xfer_page_cache_ptr[x].tag_parent = 305 &parm->curr_xfer->xroot->dma_parent_tag; 306 } 307 308 if (ppc != NULL) { 309 if (n_obj != 1) 310 *ppc = parm->xfer_page_cache_ptr; 311 else 312 *ppc = parm->dma_page_cache_ptr; 313 } 314 r = count; /* set remainder count */ 315 z = n_obj * size; /* set allocation size */ 316 pc = parm->xfer_page_cache_ptr; 317 pg = parm->dma_page_ptr; 318 319 if (n_obj == 1) { 320 /* 321 * Avoid mapping memory twice if only a single object 322 * should be allocated per page cache: 323 */ 324 for (x = 0; x != n_dma_pc; x++) { 325 if (usb_pc_alloc_mem(parm->dma_page_cache_ptr, 326 pg, z, align)) { 327 return (1); /* failure */ 328 } 329 /* Make room for one DMA page cache and "n_dma_pg" pages */ 330 parm->dma_page_cache_ptr++; 331 pg += n_dma_pg; 332 } 333 } else { 334 for (x = 0; x != n_dma_pc; x++) { 335 336 if (r < n_obj) { 337 /* compute last remainder */ 338 z = r * size; 339 n_obj = r; 340 } 341 if (usb_pc_alloc_mem(parm->dma_page_cache_ptr, 342 pg, z, align)) { 343 return (1); /* failure */ 344 } 345 /* Set beginning of current buffer */ 346 buf = parm->dma_page_cache_ptr->buffer; 347 /* Make room for one DMA page cache and "n_dma_pg" pages */ 348 parm->dma_page_cache_ptr++; 349 pg += n_dma_pg; 350 351 for (y = 0; (y != n_obj); y++, r--, pc++, pg += n_dma_pg) { 352 353 /* Load sub-chunk into DMA */ 354 if (usb_pc_dmamap_create(pc, size)) { 355 return (1); /* failure */ 356 } 357 pc->buffer = USB_ADD_BYTES(buf, y * size); 358 pc->page_start = pg; 359 360 USB_MTX_LOCK(pc->tag_parent->mtx); 361 if (usb_pc_load_mem(pc, size, 1 /* synchronous */ )) { 362 USB_MTX_UNLOCK(pc->tag_parent->mtx); 363 return (1); /* failure */ 364 } 365 USB_MTX_UNLOCK(pc->tag_parent->mtx); 366 } 367 } 368 } 369 370 parm->xfer_page_cache_ptr = pc; 371 parm->dma_page_ptr = pg; 372 return (0); 373 } 374 #endif 375 376 /*------------------------------------------------------------------------* 377 * usbd_get_max_frame_length 378 * 379 * This function returns the maximum single frame length as computed by 380 * usbd_transfer_setup(). It is useful when computing buffer sizes for 381 * devices having multiple alternate settings. The SuperSpeed endpoint 382 * companion pointer is allowed to be NULL. 383 *------------------------------------------------------------------------*/ 384 uint32_t 385 usbd_get_max_frame_length(const struct usb_endpoint_descriptor *edesc, 386 const struct usb_endpoint_ss_comp_descriptor *ecomp, 387 enum usb_dev_speed speed) 388 { 389 uint32_t max_packet_size; 390 uint32_t max_packet_count; 391 uint8_t type; 392 393 max_packet_size = UGETW(edesc->wMaxPacketSize); 394 max_packet_count = 1; 395 type = (edesc->bmAttributes & UE_XFERTYPE); 396 397 switch (speed) { 398 case USB_SPEED_HIGH: 399 switch (type) { 400 case UE_ISOCHRONOUS: 401 case UE_INTERRUPT: 402 max_packet_count += 403 (max_packet_size >> 11) & 3; 404 405 /* check for invalid max packet count */ 406 if (max_packet_count > 3) 407 max_packet_count = 3; 408 break; 409 default: 410 break; 411 } 412 max_packet_size &= 0x7FF; 413 break; 414 case USB_SPEED_SUPER: 415 max_packet_count += (max_packet_size >> 11) & 3; 416 417 if (ecomp != NULL) 418 max_packet_count += ecomp->bMaxBurst; 419 420 if ((max_packet_count == 0) || 421 (max_packet_count > 16)) 422 max_packet_count = 16; 423 424 switch (type) { 425 case UE_CONTROL: 426 max_packet_count = 1; 427 break; 428 case UE_ISOCHRONOUS: 429 if (ecomp != NULL) { 430 uint8_t mult; 431 432 mult = UE_GET_SS_ISO_MULT( 433 ecomp->bmAttributes) + 1; 434 if (mult > 3) 435 mult = 3; 436 437 max_packet_count *= mult; 438 } 439 break; 440 default: 441 break; 442 } 443 max_packet_size &= 0x7FF; 444 break; 445 default: 446 break; 447 } 448 return (max_packet_size * max_packet_count); 449 } 450 451 /*------------------------------------------------------------------------* 452 * usbd_transfer_setup_sub - transfer setup subroutine 453 * 454 * This function must be called from the "xfer_setup" callback of the 455 * USB Host or Device controller driver when setting up an USB 456 * transfer. This function will setup correct packet sizes, buffer 457 * sizes, flags and more, that are stored in the "usb_xfer" 458 * structure. 459 *------------------------------------------------------------------------*/ 460 void 461 usbd_transfer_setup_sub(struct usb_setup_params *parm) 462 { 463 enum { 464 REQ_SIZE = 8, 465 MIN_PKT = 8, 466 }; 467 struct usb_xfer *xfer = parm->curr_xfer; 468 const struct usb_config *setup = parm->curr_setup; 469 struct usb_endpoint_ss_comp_descriptor *ecomp; 470 struct usb_endpoint_descriptor *edesc; 471 struct usb_std_packet_size std_size; 472 usb_frcount_t n_frlengths; 473 usb_frcount_t n_frbuffers; 474 usb_frcount_t x; 475 uint16_t maxp_old; 476 uint8_t type; 477 uint8_t zmps; 478 479 /* 480 * Sanity check. The following parameters must be initialized before 481 * calling this function. 482 */ 483 if ((parm->hc_max_packet_size == 0) || 484 (parm->hc_max_packet_count == 0) || 485 (parm->hc_max_frame_size == 0)) { 486 parm->err = USB_ERR_INVAL; 487 goto done; 488 } 489 edesc = xfer->endpoint->edesc; 490 ecomp = xfer->endpoint->ecomp; 491 492 type = (edesc->bmAttributes & UE_XFERTYPE); 493 494 xfer->flags = setup->flags; 495 xfer->nframes = setup->frames; 496 xfer->timeout = setup->timeout; 497 xfer->callback = setup->callback; 498 xfer->interval = setup->interval; 499 xfer->endpointno = edesc->bEndpointAddress; 500 xfer->max_packet_size = UGETW(edesc->wMaxPacketSize); 501 xfer->max_packet_count = 1; 502 /* make a shadow copy: */ 503 xfer->flags_int.usb_mode = parm->udev->flags.usb_mode; 504 505 parm->bufsize = setup->bufsize; 506 507 switch (parm->speed) { 508 case USB_SPEED_HIGH: 509 switch (type) { 510 case UE_ISOCHRONOUS: 511 case UE_INTERRUPT: 512 xfer->max_packet_count += 513 (xfer->max_packet_size >> 11) & 3; 514 515 /* check for invalid max packet count */ 516 if (xfer->max_packet_count > 3) 517 xfer->max_packet_count = 3; 518 break; 519 default: 520 break; 521 } 522 xfer->max_packet_size &= 0x7FF; 523 break; 524 case USB_SPEED_SUPER: 525 xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3; 526 527 if (ecomp != NULL) 528 xfer->max_packet_count += ecomp->bMaxBurst; 529 530 if ((xfer->max_packet_count == 0) || 531 (xfer->max_packet_count > 16)) 532 xfer->max_packet_count = 16; 533 534 switch (type) { 535 case UE_CONTROL: 536 xfer->max_packet_count = 1; 537 break; 538 case UE_ISOCHRONOUS: 539 if (ecomp != NULL) { 540 uint8_t mult; 541 542 mult = UE_GET_SS_ISO_MULT( 543 ecomp->bmAttributes) + 1; 544 if (mult > 3) 545 mult = 3; 546 547 xfer->max_packet_count *= mult; 548 } 549 break; 550 default: 551 break; 552 } 553 xfer->max_packet_size &= 0x7FF; 554 break; 555 default: 556 break; 557 } 558 /* range check "max_packet_count" */ 559 560 if (xfer->max_packet_count > parm->hc_max_packet_count) { 561 xfer->max_packet_count = parm->hc_max_packet_count; 562 } 563 564 /* store max packet size value before filtering */ 565 566 maxp_old = xfer->max_packet_size; 567 568 /* filter "wMaxPacketSize" according to HC capabilities */ 569 570 if ((xfer->max_packet_size > parm->hc_max_packet_size) || 571 (xfer->max_packet_size == 0)) { 572 xfer->max_packet_size = parm->hc_max_packet_size; 573 } 574 /* filter "wMaxPacketSize" according to standard sizes */ 575 576 usbd_get_std_packet_size(&std_size, type, parm->speed); 577 578 if (std_size.range.min || std_size.range.max) { 579 580 if (xfer->max_packet_size < std_size.range.min) { 581 xfer->max_packet_size = std_size.range.min; 582 } 583 if (xfer->max_packet_size > std_size.range.max) { 584 xfer->max_packet_size = std_size.range.max; 585 } 586 } else { 587 588 if (xfer->max_packet_size >= std_size.fixed[3]) { 589 xfer->max_packet_size = std_size.fixed[3]; 590 } else if (xfer->max_packet_size >= std_size.fixed[2]) { 591 xfer->max_packet_size = std_size.fixed[2]; 592 } else if (xfer->max_packet_size >= std_size.fixed[1]) { 593 xfer->max_packet_size = std_size.fixed[1]; 594 } else { 595 /* only one possibility left */ 596 xfer->max_packet_size = std_size.fixed[0]; 597 } 598 } 599 600 /* 601 * Check if the max packet size was outside its allowed range 602 * and clamped to a valid value: 603 */ 604 if (maxp_old != xfer->max_packet_size) 605 xfer->flags_int.maxp_was_clamped = 1; 606 607 /* compute "max_frame_size" */ 608 609 usbd_update_max_frame_size(xfer); 610 611 /* check interrupt interval and transfer pre-delay */ 612 613 if (type == UE_ISOCHRONOUS) { 614 615 uint16_t frame_limit; 616 617 xfer->interval = 0; /* not used, must be zero */ 618 xfer->flags_int.isochronous_xfr = 1; /* set flag */ 619 620 if (xfer->timeout == 0) { 621 /* 622 * set a default timeout in 623 * case something goes wrong! 624 */ 625 xfer->timeout = 1000 / 4; 626 } 627 switch (parm->speed) { 628 case USB_SPEED_LOW: 629 case USB_SPEED_FULL: 630 frame_limit = USB_MAX_FS_ISOC_FRAMES_PER_XFER; 631 xfer->fps_shift = 0; 632 break; 633 default: 634 frame_limit = USB_MAX_HS_ISOC_FRAMES_PER_XFER; 635 xfer->fps_shift = edesc->bInterval; 636 if (xfer->fps_shift > 0) 637 xfer->fps_shift--; 638 if (xfer->fps_shift > 3) 639 xfer->fps_shift = 3; 640 if (xfer->flags.pre_scale_frames != 0) 641 xfer->nframes <<= (3 - xfer->fps_shift); 642 break; 643 } 644 645 if (xfer->nframes > frame_limit) { 646 /* 647 * this is not going to work 648 * cross hardware 649 */ 650 parm->err = USB_ERR_INVAL; 651 goto done; 652 } 653 if (xfer->nframes == 0) { 654 /* 655 * this is not a valid value 656 */ 657 parm->err = USB_ERR_ZERO_NFRAMES; 658 goto done; 659 } 660 } else { 661 662 /* 663 * If a value is specified use that else check the 664 * endpoint descriptor! 665 */ 666 if (type == UE_INTERRUPT) { 667 668 uint32_t temp; 669 670 if (xfer->interval == 0) { 671 672 xfer->interval = edesc->bInterval; 673 674 switch (parm->speed) { 675 case USB_SPEED_LOW: 676 case USB_SPEED_FULL: 677 break; 678 default: 679 /* 125us -> 1ms */ 680 if (xfer->interval < 4) 681 xfer->interval = 1; 682 else if (xfer->interval > 16) 683 xfer->interval = (1 << (16 - 4)); 684 else 685 xfer->interval = 686 (1 << (xfer->interval - 4)); 687 break; 688 } 689 } 690 691 if (xfer->interval == 0) { 692 /* 693 * One millisecond is the smallest 694 * interval we support: 695 */ 696 xfer->interval = 1; 697 } 698 699 xfer->fps_shift = 0; 700 temp = 1; 701 702 while ((temp != 0) && (temp < xfer->interval)) { 703 xfer->fps_shift++; 704 temp *= 2; 705 } 706 707 switch (parm->speed) { 708 case USB_SPEED_LOW: 709 case USB_SPEED_FULL: 710 break; 711 default: 712 xfer->fps_shift += 3; 713 break; 714 } 715 } 716 } 717 718 /* 719 * NOTE: we do not allow "max_packet_size" or "max_frame_size" 720 * to be equal to zero when setting up USB transfers, hence 721 * this leads to a lot of extra code in the USB kernel. 722 */ 723 724 if ((xfer->max_frame_size == 0) || 725 (xfer->max_packet_size == 0)) { 726 727 zmps = 1; 728 729 if ((parm->bufsize <= MIN_PKT) && 730 (type != UE_CONTROL) && 731 (type != UE_BULK)) { 732 733 /* workaround */ 734 xfer->max_packet_size = MIN_PKT; 735 xfer->max_packet_count = 1; 736 parm->bufsize = 0; /* automatic setup length */ 737 usbd_update_max_frame_size(xfer); 738 739 } else { 740 parm->err = USB_ERR_ZERO_MAXP; 741 goto done; 742 } 743 744 } else { 745 zmps = 0; 746 } 747 748 /* 749 * check if we should setup a default 750 * length: 751 */ 752 753 if (parm->bufsize == 0) { 754 755 parm->bufsize = xfer->max_frame_size; 756 757 if (type == UE_ISOCHRONOUS) { 758 parm->bufsize *= xfer->nframes; 759 } 760 } 761 /* 762 * check if we are about to setup a proxy 763 * type of buffer: 764 */ 765 766 if (xfer->flags.proxy_buffer) { 767 768 /* round bufsize up */ 769 770 parm->bufsize += (xfer->max_frame_size - 1); 771 772 if (parm->bufsize < xfer->max_frame_size) { 773 /* length wrapped around */ 774 parm->err = USB_ERR_INVAL; 775 goto done; 776 } 777 /* subtract remainder */ 778 779 parm->bufsize -= (parm->bufsize % xfer->max_frame_size); 780 781 /* add length of USB device request structure, if any */ 782 783 if (type == UE_CONTROL) { 784 parm->bufsize += REQ_SIZE; /* SETUP message */ 785 } 786 } 787 xfer->max_data_length = parm->bufsize; 788 789 /* Setup "n_frlengths" and "n_frbuffers" */ 790 791 if (type == UE_ISOCHRONOUS) { 792 n_frlengths = xfer->nframes; 793 n_frbuffers = 1; 794 } else { 795 796 if (type == UE_CONTROL) { 797 xfer->flags_int.control_xfr = 1; 798 if (xfer->nframes == 0) { 799 if (parm->bufsize <= REQ_SIZE) { 800 /* 801 * there will never be any data 802 * stage 803 */ 804 xfer->nframes = 1; 805 } else { 806 xfer->nframes = 2; 807 } 808 } 809 } else { 810 if (xfer->nframes == 0) { 811 xfer->nframes = 1; 812 } 813 } 814 815 n_frlengths = xfer->nframes; 816 n_frbuffers = xfer->nframes; 817 } 818 819 /* 820 * check if we have room for the 821 * USB device request structure: 822 */ 823 824 if (type == UE_CONTROL) { 825 826 if (xfer->max_data_length < REQ_SIZE) { 827 /* length wrapped around or too small bufsize */ 828 parm->err = USB_ERR_INVAL; 829 goto done; 830 } 831 xfer->max_data_length -= REQ_SIZE; 832 } 833 /* 834 * Setup "frlengths" and shadow "frlengths" for keeping the 835 * initial frame lengths when a USB transfer is complete. This 836 * information is useful when computing isochronous offsets. 837 */ 838 xfer->frlengths = parm->xfer_length_ptr; 839 parm->xfer_length_ptr += 2 * n_frlengths; 840 841 /* setup "frbuffers" */ 842 xfer->frbuffers = parm->xfer_page_cache_ptr; 843 parm->xfer_page_cache_ptr += n_frbuffers; 844 845 /* initialize max frame count */ 846 xfer->max_frame_count = xfer->nframes; 847 848 /* 849 * check if we need to setup 850 * a local buffer: 851 */ 852 853 if (!xfer->flags.ext_buffer) { 854 #if USB_HAVE_BUSDMA 855 struct usb_page_search page_info; 856 struct usb_page_cache *pc; 857 858 if (usbd_transfer_setup_sub_malloc(parm, 859 &pc, parm->bufsize, 1, 1)) { 860 parm->err = USB_ERR_NOMEM; 861 } else if (parm->buf != NULL) { 862 863 usbd_get_page(pc, 0, &page_info); 864 865 xfer->local_buffer = page_info.buffer; 866 867 usbd_xfer_set_frame_offset(xfer, 0, 0); 868 869 if ((type == UE_CONTROL) && (n_frbuffers > 1)) { 870 usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1); 871 } 872 } 873 #else 874 /* align data */ 875 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1)); 876 877 if (parm->buf != NULL) { 878 xfer->local_buffer = 879 USB_ADD_BYTES(parm->buf, parm->size[0]); 880 881 usbd_xfer_set_frame_offset(xfer, 0, 0); 882 883 if ((type == UE_CONTROL) && (n_frbuffers > 1)) { 884 usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1); 885 } 886 } 887 parm->size[0] += parm->bufsize; 888 889 /* align data again */ 890 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1)); 891 #endif 892 } 893 /* 894 * Compute maximum buffer size 895 */ 896 897 if (parm->bufsize_max < parm->bufsize) { 898 parm->bufsize_max = parm->bufsize; 899 } 900 #if USB_HAVE_BUSDMA 901 if (xfer->flags_int.bdma_enable) { 902 /* 903 * Setup "dma_page_ptr". 904 * 905 * Proof for formula below: 906 * 907 * Assume there are three USB frames having length "a", "b" and 908 * "c". These USB frames will at maximum need "z" 909 * "usb_page" structures. "z" is given by: 910 * 911 * z = ((a / USB_PAGE_SIZE) + 2) + ((b / USB_PAGE_SIZE) + 2) + 912 * ((c / USB_PAGE_SIZE) + 2); 913 * 914 * Constraining "a", "b" and "c" like this: 915 * 916 * (a + b + c) <= parm->bufsize 917 * 918 * We know that: 919 * 920 * z <= ((parm->bufsize / USB_PAGE_SIZE) + (3*2)); 921 * 922 * Here is the general formula: 923 */ 924 xfer->dma_page_ptr = parm->dma_page_ptr; 925 parm->dma_page_ptr += (2 * n_frbuffers); 926 parm->dma_page_ptr += (parm->bufsize / USB_PAGE_SIZE); 927 } 928 #endif 929 if (zmps) { 930 /* correct maximum data length */ 931 xfer->max_data_length = 0; 932 } 933 /* subtract USB frame remainder from "hc_max_frame_size" */ 934 935 xfer->max_hc_frame_size = 936 (parm->hc_max_frame_size - 937 (parm->hc_max_frame_size % xfer->max_frame_size)); 938 939 if (xfer->max_hc_frame_size == 0) { 940 parm->err = USB_ERR_INVAL; 941 goto done; 942 } 943 944 /* initialize frame buffers */ 945 946 if (parm->buf) { 947 for (x = 0; x != n_frbuffers; x++) { 948 xfer->frbuffers[x].tag_parent = 949 &xfer->xroot->dma_parent_tag; 950 #if USB_HAVE_BUSDMA 951 if (xfer->flags_int.bdma_enable && 952 (parm->bufsize_max > 0)) { 953 954 if (usb_pc_dmamap_create( 955 xfer->frbuffers + x, 956 parm->bufsize_max)) { 957 parm->err = USB_ERR_NOMEM; 958 goto done; 959 } 960 } 961 #endif 962 } 963 } 964 done: 965 if (parm->err) { 966 /* 967 * Set some dummy values so that we avoid division by zero: 968 */ 969 xfer->max_hc_frame_size = 1; 970 xfer->max_frame_size = 1; 971 xfer->max_packet_size = 1; 972 xfer->max_data_length = 0; 973 xfer->nframes = 0; 974 xfer->max_frame_count = 0; 975 } 976 } 977 978 static uint8_t 979 usbd_transfer_setup_has_bulk(const struct usb_config *setup_start, 980 uint16_t n_setup) 981 { 982 while (n_setup--) { 983 uint8_t type = setup_start[n_setup].type; 984 if (type == UE_BULK || type == UE_BULK_INTR || 985 type == UE_TYPE_ANY) 986 return (1); 987 } 988 return (0); 989 } 990 991 /*------------------------------------------------------------------------* 992 * usbd_transfer_setup - setup an array of USB transfers 993 * 994 * NOTE: You must always call "usbd_transfer_unsetup" after calling 995 * "usbd_transfer_setup" if success was returned. 996 * 997 * The idea is that the USB device driver should pre-allocate all its 998 * transfers by one call to this function. 999 * 1000 * Return values: 1001 * 0: Success 1002 * Else: Failure 1003 *------------------------------------------------------------------------*/ 1004 usb_error_t 1005 usbd_transfer_setup(struct usb_device *udev, 1006 const uint8_t *ifaces, struct usb_xfer **ppxfer, 1007 const struct usb_config *setup_start, uint16_t n_setup, 1008 void *priv_sc, struct mtx *xfer_mtx) 1009 { 1010 const struct usb_config *setup_end = setup_start + n_setup; 1011 const struct usb_config *setup; 1012 struct usb_setup_params *parm; 1013 struct usb_endpoint *ep; 1014 struct usb_xfer_root *info; 1015 struct usb_xfer *xfer; 1016 void *buf = NULL; 1017 usb_error_t error = 0; 1018 uint16_t n; 1019 uint16_t refcount; 1020 uint8_t do_unlock; 1021 1022 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 1023 "usbd_transfer_setup can sleep!"); 1024 1025 /* do some checking first */ 1026 1027 if (n_setup == 0) { 1028 DPRINTFN(6, "setup array has zero length!\n"); 1029 return (USB_ERR_INVAL); 1030 } 1031 if (ifaces == NULL) { 1032 DPRINTFN(6, "ifaces array is NULL!\n"); 1033 return (USB_ERR_INVAL); 1034 } 1035 if (xfer_mtx == NULL) { 1036 DPRINTFN(6, "using global lock\n"); 1037 xfer_mtx = &Giant; 1038 } 1039 1040 /* more sanity checks */ 1041 1042 for (setup = setup_start, n = 0; 1043 setup != setup_end; setup++, n++) { 1044 if (setup->bufsize == (usb_frlength_t)-1) { 1045 error = USB_ERR_BAD_BUFSIZE; 1046 DPRINTF("invalid bufsize\n"); 1047 } 1048 if (setup->callback == NULL) { 1049 error = USB_ERR_NO_CALLBACK; 1050 DPRINTF("no callback\n"); 1051 } 1052 ppxfer[n] = NULL; 1053 } 1054 1055 if (error) 1056 return (error); 1057 1058 /* Protect scratch area */ 1059 do_unlock = usbd_ctrl_lock(udev); 1060 1061 refcount = 0; 1062 info = NULL; 1063 1064 parm = &udev->scratch.xfer_setup[0].parm; 1065 memset(parm, 0, sizeof(*parm)); 1066 1067 parm->udev = udev; 1068 parm->speed = usbd_get_speed(udev); 1069 parm->hc_max_packet_count = 1; 1070 1071 if (parm->speed >= USB_SPEED_MAX) { 1072 parm->err = USB_ERR_INVAL; 1073 goto done; 1074 } 1075 /* setup all transfers */ 1076 1077 while (1) { 1078 1079 if (buf) { 1080 /* 1081 * Initialize the "usb_xfer_root" structure, 1082 * which is common for all our USB transfers. 1083 */ 1084 info = USB_ADD_BYTES(buf, 0); 1085 1086 info->memory_base = buf; 1087 info->memory_size = parm->size[0]; 1088 1089 #if USB_HAVE_BUSDMA 1090 info->dma_page_cache_start = USB_ADD_BYTES(buf, parm->size[4]); 1091 info->dma_page_cache_end = USB_ADD_BYTES(buf, parm->size[5]); 1092 #endif 1093 info->xfer_page_cache_start = USB_ADD_BYTES(buf, parm->size[5]); 1094 info->xfer_page_cache_end = USB_ADD_BYTES(buf, parm->size[2]); 1095 1096 cv_init(&info->cv_drain, "WDRAIN"); 1097 1098 info->xfer_mtx = xfer_mtx; 1099 #if USB_HAVE_BUSDMA 1100 usb_dma_tag_setup(&info->dma_parent_tag, 1101 parm->dma_tag_p, udev->bus->dma_parent_tag[0].tag, 1102 xfer_mtx, &usb_bdma_done_event, udev->bus->dma_bits, 1103 parm->dma_tag_max); 1104 #endif 1105 1106 info->bus = udev->bus; 1107 info->udev = udev; 1108 1109 TAILQ_INIT(&info->done_q.head); 1110 info->done_q.command = &usbd_callback_wrapper; 1111 #if USB_HAVE_BUSDMA 1112 TAILQ_INIT(&info->dma_q.head); 1113 info->dma_q.command = &usb_bdma_work_loop; 1114 #endif 1115 info->done_m[0].hdr.pm_callback = &usb_callback_proc; 1116 info->done_m[0].xroot = info; 1117 info->done_m[1].hdr.pm_callback = &usb_callback_proc; 1118 info->done_m[1].xroot = info; 1119 1120 /* 1121 * In device side mode control endpoint 1122 * requests need to run from a separate 1123 * context, else there is a chance of 1124 * deadlock! 1125 */ 1126 if (setup_start == usb_control_ep_cfg || 1127 setup_start == usb_control_ep_quirk_cfg) 1128 info->done_p = 1129 USB_BUS_CONTROL_XFER_PROC(udev->bus); 1130 else if (xfer_mtx == &Giant) 1131 info->done_p = 1132 USB_BUS_GIANT_PROC(udev->bus); 1133 else if (usbd_transfer_setup_has_bulk(setup_start, n_setup)) 1134 info->done_p = 1135 USB_BUS_NON_GIANT_BULK_PROC(udev->bus); 1136 else 1137 info->done_p = 1138 USB_BUS_NON_GIANT_ISOC_PROC(udev->bus); 1139 } 1140 /* reset sizes */ 1141 1142 parm->size[0] = 0; 1143 parm->buf = buf; 1144 parm->size[0] += sizeof(info[0]); 1145 1146 for (setup = setup_start, n = 0; 1147 setup != setup_end; setup++, n++) { 1148 1149 /* skip USB transfers without callbacks: */ 1150 if (setup->callback == NULL) { 1151 continue; 1152 } 1153 /* see if there is a matching endpoint */ 1154 ep = usbd_get_endpoint(udev, 1155 ifaces[setup->if_index], setup); 1156 1157 /* 1158 * Check that the USB PIPE is valid and that 1159 * the endpoint mode is proper. 1160 * 1161 * Make sure we don't allocate a streams 1162 * transfer when such a combination is not 1163 * valid. 1164 */ 1165 if ((ep == NULL) || (ep->methods == NULL) || 1166 ((ep->ep_mode != USB_EP_MODE_STREAMS) && 1167 (ep->ep_mode != USB_EP_MODE_DEFAULT)) || 1168 (setup->stream_id != 0 && 1169 (setup->stream_id >= USB_MAX_EP_STREAMS || 1170 (ep->ep_mode != USB_EP_MODE_STREAMS)))) { 1171 if (setup->flags.no_pipe_ok) 1172 continue; 1173 if ((setup->usb_mode != USB_MODE_DUAL) && 1174 (setup->usb_mode != udev->flags.usb_mode)) 1175 continue; 1176 parm->err = USB_ERR_NO_PIPE; 1177 goto done; 1178 } 1179 1180 /* align data properly */ 1181 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1)); 1182 1183 /* store current setup pointer */ 1184 parm->curr_setup = setup; 1185 1186 if (buf) { 1187 /* 1188 * Common initialization of the 1189 * "usb_xfer" structure. 1190 */ 1191 xfer = USB_ADD_BYTES(buf, parm->size[0]); 1192 xfer->address = udev->address; 1193 xfer->priv_sc = priv_sc; 1194 xfer->xroot = info; 1195 1196 usb_callout_init_mtx(&xfer->timeout_handle, 1197 &udev->bus->bus_mtx, 0); 1198 } else { 1199 /* 1200 * Setup a dummy xfer, hence we are 1201 * writing to the "usb_xfer" 1202 * structure pointed to by "xfer" 1203 * before we have allocated any 1204 * memory: 1205 */ 1206 xfer = &udev->scratch.xfer_setup[0].dummy; 1207 memset(xfer, 0, sizeof(*xfer)); 1208 refcount++; 1209 } 1210 1211 /* set transfer endpoint pointer */ 1212 xfer->endpoint = ep; 1213 1214 /* set transfer stream ID */ 1215 xfer->stream_id = setup->stream_id; 1216 1217 parm->size[0] += sizeof(xfer[0]); 1218 parm->methods = xfer->endpoint->methods; 1219 parm->curr_xfer = xfer; 1220 1221 /* 1222 * Call the Host or Device controller transfer 1223 * setup routine: 1224 */ 1225 (udev->bus->methods->xfer_setup) (parm); 1226 1227 /* check for error */ 1228 if (parm->err) 1229 goto done; 1230 1231 if (buf) { 1232 /* 1233 * Increment the endpoint refcount. This 1234 * basically prevents setting a new 1235 * configuration and alternate setting 1236 * when USB transfers are in use on 1237 * the given interface. Search the USB 1238 * code for "endpoint->refcount_alloc" if you 1239 * want more information. 1240 */ 1241 USB_BUS_LOCK(info->bus); 1242 if (xfer->endpoint->refcount_alloc >= USB_EP_REF_MAX) 1243 parm->err = USB_ERR_INVAL; 1244 1245 xfer->endpoint->refcount_alloc++; 1246 1247 if (xfer->endpoint->refcount_alloc == 0) 1248 panic("usbd_transfer_setup(): Refcount wrapped to zero\n"); 1249 USB_BUS_UNLOCK(info->bus); 1250 1251 /* 1252 * Whenever we set ppxfer[] then we 1253 * also need to increment the 1254 * "setup_refcount": 1255 */ 1256 info->setup_refcount++; 1257 1258 /* 1259 * Transfer is successfully setup and 1260 * can be used: 1261 */ 1262 ppxfer[n] = xfer; 1263 } 1264 1265 /* check for error */ 1266 if (parm->err) 1267 goto done; 1268 } 1269 1270 if (buf != NULL || parm->err != 0) 1271 goto done; 1272 1273 /* if no transfers, nothing to do */ 1274 if (refcount == 0) 1275 goto done; 1276 1277 /* align data properly */ 1278 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1)); 1279 1280 /* store offset temporarily */ 1281 parm->size[1] = parm->size[0]; 1282 1283 /* 1284 * The number of DMA tags required depends on 1285 * the number of endpoints. The current estimate 1286 * for maximum number of DMA tags per endpoint 1287 * is three: 1288 * 1) for loading memory 1289 * 2) for allocating memory 1290 * 3) for fixing memory [UHCI] 1291 */ 1292 parm->dma_tag_max += 3 * MIN(n_setup, USB_EP_MAX); 1293 1294 /* 1295 * DMA tags for QH, TD, Data and more. 1296 */ 1297 parm->dma_tag_max += 8; 1298 1299 parm->dma_tag_p += parm->dma_tag_max; 1300 1301 parm->size[0] += ((uint8_t *)parm->dma_tag_p) - 1302 ((uint8_t *)0); 1303 1304 /* align data properly */ 1305 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1)); 1306 1307 /* store offset temporarily */ 1308 parm->size[3] = parm->size[0]; 1309 1310 parm->size[0] += ((uint8_t *)parm->dma_page_ptr) - 1311 ((uint8_t *)0); 1312 1313 /* align data properly */ 1314 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1)); 1315 1316 /* store offset temporarily */ 1317 parm->size[4] = parm->size[0]; 1318 1319 parm->size[0] += ((uint8_t *)parm->dma_page_cache_ptr) - 1320 ((uint8_t *)0); 1321 1322 /* store end offset temporarily */ 1323 parm->size[5] = parm->size[0]; 1324 1325 parm->size[0] += ((uint8_t *)parm->xfer_page_cache_ptr) - 1326 ((uint8_t *)0); 1327 1328 /* store end offset temporarily */ 1329 1330 parm->size[2] = parm->size[0]; 1331 1332 /* align data properly */ 1333 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1)); 1334 1335 parm->size[6] = parm->size[0]; 1336 1337 parm->size[0] += ((uint8_t *)parm->xfer_length_ptr) - 1338 ((uint8_t *)0); 1339 1340 /* align data properly */ 1341 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1)); 1342 1343 /* allocate zeroed memory */ 1344 buf = malloc(parm->size[0], M_USB, M_WAITOK | M_ZERO); 1345 1346 if (buf == NULL) { 1347 parm->err = USB_ERR_NOMEM; 1348 DPRINTFN(0, "cannot allocate memory block for " 1349 "configuration (%d bytes)\n", 1350 parm->size[0]); 1351 goto done; 1352 } 1353 parm->dma_tag_p = USB_ADD_BYTES(buf, parm->size[1]); 1354 parm->dma_page_ptr = USB_ADD_BYTES(buf, parm->size[3]); 1355 parm->dma_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[4]); 1356 parm->xfer_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[5]); 1357 parm->xfer_length_ptr = USB_ADD_BYTES(buf, parm->size[6]); 1358 } 1359 1360 done: 1361 if (buf) { 1362 if (info->setup_refcount == 0) { 1363 /* 1364 * "usbd_transfer_unsetup_sub" will unlock 1365 * the bus mutex before returning ! 1366 */ 1367 USB_BUS_LOCK(info->bus); 1368 1369 /* something went wrong */ 1370 usbd_transfer_unsetup_sub(info, 0); 1371 } 1372 } 1373 1374 /* check if any errors happened */ 1375 if (parm->err) 1376 usbd_transfer_unsetup(ppxfer, n_setup); 1377 1378 error = parm->err; 1379 1380 if (do_unlock) 1381 usbd_ctrl_unlock(udev); 1382 1383 return (error); 1384 } 1385 1386 /*------------------------------------------------------------------------* 1387 * usbd_transfer_unsetup_sub - factored out code 1388 *------------------------------------------------------------------------*/ 1389 static void 1390 usbd_transfer_unsetup_sub(struct usb_xfer_root *info, uint8_t needs_delay) 1391 { 1392 #if USB_HAVE_BUSDMA 1393 struct usb_page_cache *pc; 1394 #endif 1395 1396 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED); 1397 1398 /* wait for any outstanding DMA operations */ 1399 1400 if (needs_delay) { 1401 usb_timeout_t temp; 1402 temp = usbd_get_dma_delay(info->udev); 1403 if (temp != 0) { 1404 usb_pause_mtx(&info->bus->bus_mtx, 1405 USB_MS_TO_TICKS(temp)); 1406 } 1407 } 1408 1409 /* make sure that our done messages are not queued anywhere */ 1410 usb_proc_mwait(info->done_p, &info->done_m[0], &info->done_m[1]); 1411 1412 USB_BUS_UNLOCK(info->bus); 1413 1414 #if USB_HAVE_BUSDMA 1415 /* free DMA'able memory, if any */ 1416 pc = info->dma_page_cache_start; 1417 while (pc != info->dma_page_cache_end) { 1418 usb_pc_free_mem(pc); 1419 pc++; 1420 } 1421 1422 /* free DMA maps in all "xfer->frbuffers" */ 1423 pc = info->xfer_page_cache_start; 1424 while (pc != info->xfer_page_cache_end) { 1425 usb_pc_dmamap_destroy(pc); 1426 pc++; 1427 } 1428 1429 /* free all DMA tags */ 1430 usb_dma_tag_unsetup(&info->dma_parent_tag); 1431 #endif 1432 1433 cv_destroy(&info->cv_drain); 1434 1435 /* 1436 * free the "memory_base" last, hence the "info" structure is 1437 * contained within the "memory_base"! 1438 */ 1439 free(info->memory_base, M_USB); 1440 } 1441 1442 /*------------------------------------------------------------------------* 1443 * usbd_transfer_unsetup - unsetup/free an array of USB transfers 1444 * 1445 * NOTE: All USB transfers in progress will get called back passing 1446 * the error code "USB_ERR_CANCELLED" before this function 1447 * returns. 1448 *------------------------------------------------------------------------*/ 1449 void 1450 usbd_transfer_unsetup(struct usb_xfer **pxfer, uint16_t n_setup) 1451 { 1452 struct usb_xfer *xfer; 1453 struct usb_xfer_root *info; 1454 uint8_t needs_delay = 0; 1455 1456 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 1457 "usbd_transfer_unsetup can sleep!"); 1458 1459 while (n_setup--) { 1460 xfer = pxfer[n_setup]; 1461 1462 if (xfer == NULL) 1463 continue; 1464 1465 info = xfer->xroot; 1466 1467 USB_XFER_LOCK(xfer); 1468 USB_BUS_LOCK(info->bus); 1469 1470 /* 1471 * HINT: when you start/stop a transfer, it might be a 1472 * good idea to directly use the "pxfer[]" structure: 1473 * 1474 * usbd_transfer_start(sc->pxfer[0]); 1475 * usbd_transfer_stop(sc->pxfer[0]); 1476 * 1477 * That way, if your code has many parts that will not 1478 * stop running under the same lock, in other words 1479 * "xfer_mtx", the usbd_transfer_start and 1480 * usbd_transfer_stop functions will simply return 1481 * when they detect a NULL pointer argument. 1482 * 1483 * To avoid any races we clear the "pxfer[]" pointer 1484 * while holding the private mutex of the driver: 1485 */ 1486 pxfer[n_setup] = NULL; 1487 1488 USB_BUS_UNLOCK(info->bus); 1489 USB_XFER_UNLOCK(xfer); 1490 1491 usbd_transfer_drain(xfer); 1492 1493 #if USB_HAVE_BUSDMA 1494 if (xfer->flags_int.bdma_enable) 1495 needs_delay = 1; 1496 #endif 1497 /* 1498 * NOTE: default endpoint does not have an 1499 * interface, even if endpoint->iface_index == 0 1500 */ 1501 USB_BUS_LOCK(info->bus); 1502 xfer->endpoint->refcount_alloc--; 1503 USB_BUS_UNLOCK(info->bus); 1504 1505 usb_callout_drain(&xfer->timeout_handle); 1506 1507 USB_BUS_LOCK(info->bus); 1508 1509 USB_ASSERT(info->setup_refcount != 0, ("Invalid setup " 1510 "reference count\n")); 1511 1512 info->setup_refcount--; 1513 1514 if (info->setup_refcount == 0) { 1515 usbd_transfer_unsetup_sub(info, 1516 needs_delay); 1517 } else { 1518 USB_BUS_UNLOCK(info->bus); 1519 } 1520 } 1521 } 1522 1523 /*------------------------------------------------------------------------* 1524 * usbd_control_transfer_init - factored out code 1525 * 1526 * In USB Device Mode we have to wait for the SETUP packet which 1527 * containst the "struct usb_device_request" structure, before we can 1528 * transfer any data. In USB Host Mode we already have the SETUP 1529 * packet at the moment the USB transfer is started. This leads us to 1530 * having to setup the USB transfer at two different places in 1531 * time. This function just contains factored out control transfer 1532 * initialisation code, so that we don't duplicate the code. 1533 *------------------------------------------------------------------------*/ 1534 static void 1535 usbd_control_transfer_init(struct usb_xfer *xfer) 1536 { 1537 struct usb_device_request req; 1538 1539 /* copy out the USB request header */ 1540 1541 usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req)); 1542 1543 /* setup remainder */ 1544 1545 xfer->flags_int.control_rem = UGETW(req.wLength); 1546 1547 /* copy direction to endpoint variable */ 1548 1549 xfer->endpointno &= ~(UE_DIR_IN | UE_DIR_OUT); 1550 xfer->endpointno |= 1551 (req.bmRequestType & UT_READ) ? UE_DIR_IN : UE_DIR_OUT; 1552 } 1553 1554 /*------------------------------------------------------------------------* 1555 * usbd_control_transfer_did_data 1556 * 1557 * This function returns non-zero if a control endpoint has 1558 * transferred the first DATA packet after the SETUP packet. 1559 * Else it returns zero. 1560 *------------------------------------------------------------------------*/ 1561 static uint8_t 1562 usbd_control_transfer_did_data(struct usb_xfer *xfer) 1563 { 1564 struct usb_device_request req; 1565 1566 /* SETUP packet is not yet sent */ 1567 if (xfer->flags_int.control_hdr != 0) 1568 return (0); 1569 1570 /* copy out the USB request header */ 1571 usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req)); 1572 1573 /* compare remainder to the initial value */ 1574 return (xfer->flags_int.control_rem != UGETW(req.wLength)); 1575 } 1576 1577 /*------------------------------------------------------------------------* 1578 * usbd_setup_ctrl_transfer 1579 * 1580 * This function handles initialisation of control transfers. Control 1581 * transfers are special in that regard that they can both transmit 1582 * and receive data. 1583 * 1584 * Return values: 1585 * 0: Success 1586 * Else: Failure 1587 *------------------------------------------------------------------------*/ 1588 static int 1589 usbd_setup_ctrl_transfer(struct usb_xfer *xfer) 1590 { 1591 usb_frlength_t len; 1592 1593 /* Check for control endpoint stall */ 1594 if (xfer->flags.stall_pipe && xfer->flags_int.control_act) { 1595 /* the control transfer is no longer active */ 1596 xfer->flags_int.control_stall = 1; 1597 xfer->flags_int.control_act = 0; 1598 } else { 1599 /* don't stall control transfer by default */ 1600 xfer->flags_int.control_stall = 0; 1601 } 1602 1603 /* Check for invalid number of frames */ 1604 if (xfer->nframes > 2) { 1605 /* 1606 * If you need to split a control transfer, you 1607 * have to do one part at a time. Only with 1608 * non-control transfers you can do multiple 1609 * parts a time. 1610 */ 1611 DPRINTFN(0, "Too many frames: %u\n", 1612 (unsigned int)xfer->nframes); 1613 goto error; 1614 } 1615 1616 /* 1617 * Check if there is a control 1618 * transfer in progress: 1619 */ 1620 if (xfer->flags_int.control_act) { 1621 1622 if (xfer->flags_int.control_hdr) { 1623 1624 /* clear send header flag */ 1625 1626 xfer->flags_int.control_hdr = 0; 1627 1628 /* setup control transfer */ 1629 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) { 1630 usbd_control_transfer_init(xfer); 1631 } 1632 } 1633 /* get data length */ 1634 1635 len = xfer->sumlen; 1636 1637 } else { 1638 1639 /* the size of the SETUP structure is hardcoded ! */ 1640 1641 if (xfer->frlengths[0] != sizeof(struct usb_device_request)) { 1642 DPRINTFN(0, "Wrong framelength %u != %zu\n", 1643 xfer->frlengths[0], sizeof(struct 1644 usb_device_request)); 1645 goto error; 1646 } 1647 /* check USB mode */ 1648 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) { 1649 1650 /* check number of frames */ 1651 if (xfer->nframes != 1) { 1652 /* 1653 * We need to receive the setup 1654 * message first so that we know the 1655 * data direction! 1656 */ 1657 DPRINTF("Misconfigured transfer\n"); 1658 goto error; 1659 } 1660 /* 1661 * Set a dummy "control_rem" value. This 1662 * variable will be overwritten later by a 1663 * call to "usbd_control_transfer_init()" ! 1664 */ 1665 xfer->flags_int.control_rem = 0xFFFF; 1666 } else { 1667 1668 /* setup "endpoint" and "control_rem" */ 1669 1670 usbd_control_transfer_init(xfer); 1671 } 1672 1673 /* set transfer-header flag */ 1674 1675 xfer->flags_int.control_hdr = 1; 1676 1677 /* get data length */ 1678 1679 len = (xfer->sumlen - sizeof(struct usb_device_request)); 1680 } 1681 1682 /* update did data flag */ 1683 1684 xfer->flags_int.control_did_data = 1685 usbd_control_transfer_did_data(xfer); 1686 1687 /* check if there is a length mismatch */ 1688 1689 if (len > xfer->flags_int.control_rem) { 1690 DPRINTFN(0, "Length (%d) greater than " 1691 "remaining length (%d)\n", len, 1692 xfer->flags_int.control_rem); 1693 goto error; 1694 } 1695 /* check if we are doing a short transfer */ 1696 1697 if (xfer->flags.force_short_xfer) { 1698 xfer->flags_int.control_rem = 0; 1699 } else { 1700 if ((len != xfer->max_data_length) && 1701 (len != xfer->flags_int.control_rem) && 1702 (xfer->nframes != 1)) { 1703 DPRINTFN(0, "Short control transfer without " 1704 "force_short_xfer set\n"); 1705 goto error; 1706 } 1707 xfer->flags_int.control_rem -= len; 1708 } 1709 1710 /* the status part is executed when "control_act" is 0 */ 1711 1712 if ((xfer->flags_int.control_rem > 0) || 1713 (xfer->flags.manual_status)) { 1714 /* don't execute the STATUS stage yet */ 1715 xfer->flags_int.control_act = 1; 1716 1717 /* sanity check */ 1718 if ((!xfer->flags_int.control_hdr) && 1719 (xfer->nframes == 1)) { 1720 /* 1721 * This is not a valid operation! 1722 */ 1723 DPRINTFN(0, "Invalid parameter " 1724 "combination\n"); 1725 goto error; 1726 } 1727 } else { 1728 /* time to execute the STATUS stage */ 1729 xfer->flags_int.control_act = 0; 1730 } 1731 return (0); /* success */ 1732 1733 error: 1734 return (1); /* failure */ 1735 } 1736 1737 /*------------------------------------------------------------------------* 1738 * usbd_transfer_submit - start USB hardware for the given transfer 1739 * 1740 * This function should only be called from the USB callback. 1741 *------------------------------------------------------------------------*/ 1742 void 1743 usbd_transfer_submit(struct usb_xfer *xfer) 1744 { 1745 struct usb_xfer_root *info; 1746 struct usb_bus *bus; 1747 usb_frcount_t x; 1748 1749 info = xfer->xroot; 1750 bus = info->bus; 1751 1752 DPRINTF("xfer=%p, endpoint=%p, nframes=%d, dir=%s\n", 1753 xfer, xfer->endpoint, xfer->nframes, USB_GET_DATA_ISREAD(xfer) ? 1754 "read" : "write"); 1755 1756 #ifdef USB_DEBUG 1757 if (USB_DEBUG_VAR > 0) { 1758 USB_BUS_LOCK(bus); 1759 1760 usb_dump_endpoint(xfer->endpoint); 1761 1762 USB_BUS_UNLOCK(bus); 1763 } 1764 #endif 1765 1766 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 1767 USB_BUS_LOCK_ASSERT(bus, MA_NOTOWNED); 1768 1769 /* Only open the USB transfer once! */ 1770 if (!xfer->flags_int.open) { 1771 xfer->flags_int.open = 1; 1772 1773 DPRINTF("open\n"); 1774 1775 USB_BUS_LOCK(bus); 1776 (xfer->endpoint->methods->open) (xfer); 1777 USB_BUS_UNLOCK(bus); 1778 } 1779 /* set "transferring" flag */ 1780 xfer->flags_int.transferring = 1; 1781 1782 #if USB_HAVE_POWERD 1783 /* increment power reference */ 1784 usbd_transfer_power_ref(xfer, 1); 1785 #endif 1786 /* 1787 * Check if the transfer is waiting on a queue, most 1788 * frequently the "done_q": 1789 */ 1790 if (xfer->wait_queue) { 1791 USB_BUS_LOCK(bus); 1792 usbd_transfer_dequeue(xfer); 1793 USB_BUS_UNLOCK(bus); 1794 } 1795 /* clear "did_dma_delay" flag */ 1796 xfer->flags_int.did_dma_delay = 0; 1797 1798 /* clear "did_close" flag */ 1799 xfer->flags_int.did_close = 0; 1800 1801 #if USB_HAVE_BUSDMA 1802 /* clear "bdma_setup" flag */ 1803 xfer->flags_int.bdma_setup = 0; 1804 #endif 1805 /* by default we cannot cancel any USB transfer immediately */ 1806 xfer->flags_int.can_cancel_immed = 0; 1807 1808 /* clear lengths and frame counts by default */ 1809 xfer->sumlen = 0; 1810 xfer->actlen = 0; 1811 xfer->aframes = 0; 1812 1813 /* clear any previous errors */ 1814 xfer->error = 0; 1815 1816 /* Check if the device is still alive */ 1817 if (info->udev->state < USB_STATE_POWERED) { 1818 USB_BUS_LOCK(bus); 1819 /* 1820 * Must return cancelled error code else 1821 * device drivers can hang. 1822 */ 1823 usbd_transfer_done(xfer, USB_ERR_CANCELLED); 1824 USB_BUS_UNLOCK(bus); 1825 return; 1826 } 1827 1828 /* sanity check */ 1829 if (xfer->nframes == 0) { 1830 if (xfer->flags.stall_pipe) { 1831 /* 1832 * Special case - want to stall without transferring 1833 * any data: 1834 */ 1835 DPRINTF("xfer=%p nframes=0: stall " 1836 "or clear stall!\n", xfer); 1837 USB_BUS_LOCK(bus); 1838 xfer->flags_int.can_cancel_immed = 1; 1839 /* start the transfer */ 1840 usb_command_wrapper(&xfer->endpoint-> 1841 endpoint_q[xfer->stream_id], xfer); 1842 USB_BUS_UNLOCK(bus); 1843 return; 1844 } 1845 USB_BUS_LOCK(bus); 1846 usbd_transfer_done(xfer, USB_ERR_INVAL); 1847 USB_BUS_UNLOCK(bus); 1848 return; 1849 } 1850 /* compute some variables */ 1851 1852 for (x = 0; x != xfer->nframes; x++) { 1853 /* make a copy of the frlenghts[] */ 1854 xfer->frlengths[x + xfer->max_frame_count] = xfer->frlengths[x]; 1855 /* compute total transfer length */ 1856 xfer->sumlen += xfer->frlengths[x]; 1857 if (xfer->sumlen < xfer->frlengths[x]) { 1858 /* length wrapped around */ 1859 USB_BUS_LOCK(bus); 1860 usbd_transfer_done(xfer, USB_ERR_INVAL); 1861 USB_BUS_UNLOCK(bus); 1862 return; 1863 } 1864 } 1865 1866 /* clear some internal flags */ 1867 1868 xfer->flags_int.short_xfer_ok = 0; 1869 xfer->flags_int.short_frames_ok = 0; 1870 1871 /* check if this is a control transfer */ 1872 1873 if (xfer->flags_int.control_xfr) { 1874 1875 if (usbd_setup_ctrl_transfer(xfer)) { 1876 USB_BUS_LOCK(bus); 1877 usbd_transfer_done(xfer, USB_ERR_STALLED); 1878 USB_BUS_UNLOCK(bus); 1879 return; 1880 } 1881 } 1882 /* 1883 * Setup filtered version of some transfer flags, 1884 * in case of data read direction 1885 */ 1886 if (USB_GET_DATA_ISREAD(xfer)) { 1887 1888 if (xfer->flags.short_frames_ok) { 1889 xfer->flags_int.short_xfer_ok = 1; 1890 xfer->flags_int.short_frames_ok = 1; 1891 } else if (xfer->flags.short_xfer_ok) { 1892 xfer->flags_int.short_xfer_ok = 1; 1893 1894 /* check for control transfer */ 1895 if (xfer->flags_int.control_xfr) { 1896 /* 1897 * 1) Control transfers do not support 1898 * reception of multiple short USB 1899 * frames in host mode and device side 1900 * mode, with exception of: 1901 * 1902 * 2) Due to sometimes buggy device 1903 * side firmware we need to do a 1904 * STATUS stage in case of short 1905 * control transfers in USB host mode. 1906 * The STATUS stage then becomes the 1907 * "alt_next" to the DATA stage. 1908 */ 1909 xfer->flags_int.short_frames_ok = 1; 1910 } 1911 } 1912 } 1913 /* 1914 * Check if BUS-DMA support is enabled and try to load virtual 1915 * buffers into DMA, if any: 1916 */ 1917 #if USB_HAVE_BUSDMA 1918 if (xfer->flags_int.bdma_enable) { 1919 /* insert the USB transfer last in the BUS-DMA queue */ 1920 usb_command_wrapper(&xfer->xroot->dma_q, xfer); 1921 return; 1922 } 1923 #endif 1924 /* 1925 * Enter the USB transfer into the Host Controller or 1926 * Device Controller schedule: 1927 */ 1928 usbd_pipe_enter(xfer); 1929 } 1930 1931 /*------------------------------------------------------------------------* 1932 * usbd_pipe_enter - factored out code 1933 *------------------------------------------------------------------------*/ 1934 void 1935 usbd_pipe_enter(struct usb_xfer *xfer) 1936 { 1937 struct usb_endpoint *ep; 1938 1939 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 1940 1941 USB_BUS_LOCK(xfer->xroot->bus); 1942 1943 ep = xfer->endpoint; 1944 1945 DPRINTF("enter\n"); 1946 1947 /* the transfer can now be cancelled */ 1948 xfer->flags_int.can_cancel_immed = 1; 1949 1950 /* enter the transfer */ 1951 (ep->methods->enter) (xfer); 1952 1953 /* check for transfer error */ 1954 if (xfer->error) { 1955 /* some error has happened */ 1956 usbd_transfer_done(xfer, 0); 1957 USB_BUS_UNLOCK(xfer->xroot->bus); 1958 return; 1959 } 1960 1961 /* start the transfer */ 1962 usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], xfer); 1963 USB_BUS_UNLOCK(xfer->xroot->bus); 1964 } 1965 1966 /*------------------------------------------------------------------------* 1967 * usbd_transfer_start - start an USB transfer 1968 * 1969 * NOTE: Calling this function more than one time will only 1970 * result in a single transfer start, until the USB transfer 1971 * completes. 1972 *------------------------------------------------------------------------*/ 1973 void 1974 usbd_transfer_start(struct usb_xfer *xfer) 1975 { 1976 if (xfer == NULL) { 1977 /* transfer is gone */ 1978 return; 1979 } 1980 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 1981 1982 /* mark the USB transfer started */ 1983 1984 if (!xfer->flags_int.started) { 1985 /* lock the BUS lock to avoid races updating flags_int */ 1986 USB_BUS_LOCK(xfer->xroot->bus); 1987 xfer->flags_int.started = 1; 1988 USB_BUS_UNLOCK(xfer->xroot->bus); 1989 } 1990 /* check if the USB transfer callback is already transferring */ 1991 1992 if (xfer->flags_int.transferring) { 1993 return; 1994 } 1995 USB_BUS_LOCK(xfer->xroot->bus); 1996 /* call the USB transfer callback */ 1997 usbd_callback_ss_done_defer(xfer); 1998 USB_BUS_UNLOCK(xfer->xroot->bus); 1999 } 2000 2001 /*------------------------------------------------------------------------* 2002 * usbd_transfer_stop - stop an USB transfer 2003 * 2004 * NOTE: Calling this function more than one time will only 2005 * result in a single transfer stop. 2006 * NOTE: When this function returns it is not safe to free nor 2007 * reuse any DMA buffers. See "usbd_transfer_drain()". 2008 *------------------------------------------------------------------------*/ 2009 void 2010 usbd_transfer_stop(struct usb_xfer *xfer) 2011 { 2012 struct usb_endpoint *ep; 2013 2014 if (xfer == NULL) { 2015 /* transfer is gone */ 2016 return; 2017 } 2018 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 2019 2020 /* check if the USB transfer was ever opened */ 2021 2022 if (!xfer->flags_int.open) { 2023 if (xfer->flags_int.started) { 2024 /* nothing to do except clearing the "started" flag */ 2025 /* lock the BUS lock to avoid races updating flags_int */ 2026 USB_BUS_LOCK(xfer->xroot->bus); 2027 xfer->flags_int.started = 0; 2028 USB_BUS_UNLOCK(xfer->xroot->bus); 2029 } 2030 return; 2031 } 2032 /* try to stop the current USB transfer */ 2033 2034 USB_BUS_LOCK(xfer->xroot->bus); 2035 /* override any previous error */ 2036 xfer->error = USB_ERR_CANCELLED; 2037 2038 /* 2039 * Clear "open" and "started" when both private and USB lock 2040 * is locked so that we don't get a race updating "flags_int" 2041 */ 2042 xfer->flags_int.open = 0; 2043 xfer->flags_int.started = 0; 2044 2045 /* 2046 * Check if we can cancel the USB transfer immediately. 2047 */ 2048 if (xfer->flags_int.transferring) { 2049 if (xfer->flags_int.can_cancel_immed && 2050 (!xfer->flags_int.did_close)) { 2051 DPRINTF("close\n"); 2052 /* 2053 * The following will lead to an USB_ERR_CANCELLED 2054 * error code being passed to the USB callback. 2055 */ 2056 (xfer->endpoint->methods->close) (xfer); 2057 /* only close once */ 2058 xfer->flags_int.did_close = 1; 2059 } else { 2060 /* need to wait for the next done callback */ 2061 } 2062 } else { 2063 DPRINTF("close\n"); 2064 2065 /* close here and now */ 2066 (xfer->endpoint->methods->close) (xfer); 2067 2068 /* 2069 * Any additional DMA delay is done by 2070 * "usbd_transfer_unsetup()". 2071 */ 2072 2073 /* 2074 * Special case. Check if we need to restart a blocked 2075 * endpoint. 2076 */ 2077 ep = xfer->endpoint; 2078 2079 /* 2080 * If the current USB transfer is completing we need 2081 * to start the next one: 2082 */ 2083 if (ep->endpoint_q[xfer->stream_id].curr == xfer) { 2084 usb_command_wrapper( 2085 &ep->endpoint_q[xfer->stream_id], NULL); 2086 } 2087 } 2088 2089 USB_BUS_UNLOCK(xfer->xroot->bus); 2090 } 2091 2092 /*------------------------------------------------------------------------* 2093 * usbd_transfer_pending 2094 * 2095 * This function will check if an USB transfer is pending which is a 2096 * little bit complicated! 2097 * Return values: 2098 * 0: Not pending 2099 * 1: Pending: The USB transfer will receive a callback in the future. 2100 *------------------------------------------------------------------------*/ 2101 uint8_t 2102 usbd_transfer_pending(struct usb_xfer *xfer) 2103 { 2104 struct usb_xfer_root *info; 2105 struct usb_xfer_queue *pq; 2106 2107 if (xfer == NULL) { 2108 /* transfer is gone */ 2109 return (0); 2110 } 2111 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 2112 2113 if (xfer->flags_int.transferring) { 2114 /* trivial case */ 2115 return (1); 2116 } 2117 USB_BUS_LOCK(xfer->xroot->bus); 2118 if (xfer->wait_queue) { 2119 /* we are waiting on a queue somewhere */ 2120 USB_BUS_UNLOCK(xfer->xroot->bus); 2121 return (1); 2122 } 2123 info = xfer->xroot; 2124 pq = &info->done_q; 2125 2126 if (pq->curr == xfer) { 2127 /* we are currently scheduled for callback */ 2128 USB_BUS_UNLOCK(xfer->xroot->bus); 2129 return (1); 2130 } 2131 /* we are not pending */ 2132 USB_BUS_UNLOCK(xfer->xroot->bus); 2133 return (0); 2134 } 2135 2136 /*------------------------------------------------------------------------* 2137 * usbd_transfer_drain 2138 * 2139 * This function will stop the USB transfer and wait for any 2140 * additional BUS-DMA and HW-DMA operations to complete. Buffers that 2141 * are loaded into DMA can safely be freed or reused after that this 2142 * function has returned. 2143 *------------------------------------------------------------------------*/ 2144 void 2145 usbd_transfer_drain(struct usb_xfer *xfer) 2146 { 2147 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 2148 "usbd_transfer_drain can sleep!"); 2149 2150 if (xfer == NULL) { 2151 /* transfer is gone */ 2152 return; 2153 } 2154 if (xfer->xroot->xfer_mtx != &Giant) { 2155 USB_XFER_LOCK_ASSERT(xfer, MA_NOTOWNED); 2156 } 2157 USB_XFER_LOCK(xfer); 2158 2159 usbd_transfer_stop(xfer); 2160 2161 while (usbd_transfer_pending(xfer) || 2162 xfer->flags_int.doing_callback) { 2163 2164 /* 2165 * It is allowed that the callback can drop its 2166 * transfer mutex. In that case checking only 2167 * "usbd_transfer_pending()" is not enough to tell if 2168 * the USB transfer is fully drained. We also need to 2169 * check the internal "doing_callback" flag. 2170 */ 2171 xfer->flags_int.draining = 1; 2172 2173 /* 2174 * Wait until the current outstanding USB 2175 * transfer is complete ! 2176 */ 2177 cv_wait(&xfer->xroot->cv_drain, xfer->xroot->xfer_mtx); 2178 } 2179 USB_XFER_UNLOCK(xfer); 2180 } 2181 2182 struct usb_page_cache * 2183 usbd_xfer_get_frame(struct usb_xfer *xfer, usb_frcount_t frindex) 2184 { 2185 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 2186 2187 return (&xfer->frbuffers[frindex]); 2188 } 2189 2190 void * 2191 usbd_xfer_get_frame_buffer(struct usb_xfer *xfer, usb_frcount_t frindex) 2192 { 2193 struct usb_page_search page_info; 2194 2195 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 2196 2197 usbd_get_page(&xfer->frbuffers[frindex], 0, &page_info); 2198 return (page_info.buffer); 2199 } 2200 2201 /*------------------------------------------------------------------------* 2202 * usbd_xfer_get_fps_shift 2203 * 2204 * The following function is only useful for isochronous transfers. It 2205 * returns how many times the frame execution rate has been shifted 2206 * down. 2207 * 2208 * Return value: 2209 * Success: 0..3 2210 * Failure: 0 2211 *------------------------------------------------------------------------*/ 2212 uint8_t 2213 usbd_xfer_get_fps_shift(struct usb_xfer *xfer) 2214 { 2215 return (xfer->fps_shift); 2216 } 2217 2218 usb_frlength_t 2219 usbd_xfer_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex) 2220 { 2221 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 2222 2223 return (xfer->frlengths[frindex]); 2224 } 2225 2226 /*------------------------------------------------------------------------* 2227 * usbd_xfer_set_frame_data 2228 * 2229 * This function sets the pointer of the buffer that should 2230 * loaded directly into DMA for the given USB frame. Passing "ptr" 2231 * equal to NULL while the corresponding "frlength" is greater 2232 * than zero gives undefined results! 2233 *------------------------------------------------------------------------*/ 2234 void 2235 usbd_xfer_set_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex, 2236 void *ptr, usb_frlength_t len) 2237 { 2238 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 2239 2240 /* set virtual address to load and length */ 2241 xfer->frbuffers[frindex].buffer = ptr; 2242 usbd_xfer_set_frame_len(xfer, frindex, len); 2243 } 2244 2245 void 2246 usbd_xfer_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex, 2247 void **ptr, int *len) 2248 { 2249 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 2250 2251 if (ptr != NULL) 2252 *ptr = xfer->frbuffers[frindex].buffer; 2253 if (len != NULL) 2254 *len = xfer->frlengths[frindex]; 2255 } 2256 2257 /*------------------------------------------------------------------------* 2258 * usbd_xfer_old_frame_length 2259 * 2260 * This function returns the framelength of the given frame at the 2261 * time the transfer was submitted. This function can be used to 2262 * compute the starting data pointer of the next isochronous frame 2263 * when an isochronous transfer has completed. 2264 *------------------------------------------------------------------------*/ 2265 usb_frlength_t 2266 usbd_xfer_old_frame_length(struct usb_xfer *xfer, usb_frcount_t frindex) 2267 { 2268 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 2269 2270 return (xfer->frlengths[frindex + xfer->max_frame_count]); 2271 } 2272 2273 void 2274 usbd_xfer_status(struct usb_xfer *xfer, int *actlen, int *sumlen, int *aframes, 2275 int *nframes) 2276 { 2277 if (actlen != NULL) 2278 *actlen = xfer->actlen; 2279 if (sumlen != NULL) 2280 *sumlen = xfer->sumlen; 2281 if (aframes != NULL) 2282 *aframes = xfer->aframes; 2283 if (nframes != NULL) 2284 *nframes = xfer->nframes; 2285 } 2286 2287 /*------------------------------------------------------------------------* 2288 * usbd_xfer_set_frame_offset 2289 * 2290 * This function sets the frame data buffer offset relative to the beginning 2291 * of the USB DMA buffer allocated for this USB transfer. 2292 *------------------------------------------------------------------------*/ 2293 void 2294 usbd_xfer_set_frame_offset(struct usb_xfer *xfer, usb_frlength_t offset, 2295 usb_frcount_t frindex) 2296 { 2297 KASSERT(!xfer->flags.ext_buffer, ("Cannot offset data frame " 2298 "when the USB buffer is external\n")); 2299 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 2300 2301 /* set virtual address to load */ 2302 xfer->frbuffers[frindex].buffer = 2303 USB_ADD_BYTES(xfer->local_buffer, offset); 2304 } 2305 2306 void 2307 usbd_xfer_set_interval(struct usb_xfer *xfer, int i) 2308 { 2309 xfer->interval = i; 2310 } 2311 2312 void 2313 usbd_xfer_set_timeout(struct usb_xfer *xfer, int t) 2314 { 2315 xfer->timeout = t; 2316 } 2317 2318 void 2319 usbd_xfer_set_frames(struct usb_xfer *xfer, usb_frcount_t n) 2320 { 2321 xfer->nframes = n; 2322 } 2323 2324 usb_frcount_t 2325 usbd_xfer_max_frames(struct usb_xfer *xfer) 2326 { 2327 return (xfer->max_frame_count); 2328 } 2329 2330 usb_frlength_t 2331 usbd_xfer_max_len(struct usb_xfer *xfer) 2332 { 2333 return (xfer->max_data_length); 2334 } 2335 2336 usb_frlength_t 2337 usbd_xfer_max_framelen(struct usb_xfer *xfer) 2338 { 2339 return (xfer->max_frame_size); 2340 } 2341 2342 void 2343 usbd_xfer_set_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex, 2344 usb_frlength_t len) 2345 { 2346 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 2347 2348 xfer->frlengths[frindex] = len; 2349 } 2350 2351 /*------------------------------------------------------------------------* 2352 * usb_callback_proc - factored out code 2353 * 2354 * This function performs USB callbacks. 2355 *------------------------------------------------------------------------*/ 2356 static void 2357 usb_callback_proc(struct usb_proc_msg *_pm) 2358 { 2359 struct usb_done_msg *pm = (void *)_pm; 2360 struct usb_xfer_root *info = pm->xroot; 2361 2362 /* Change locking order */ 2363 USB_BUS_UNLOCK(info->bus); 2364 2365 /* 2366 * We exploit the fact that the mutex is the same for all 2367 * callbacks that will be called from this thread: 2368 */ 2369 USB_MTX_LOCK(info->xfer_mtx); 2370 USB_BUS_LOCK(info->bus); 2371 2372 /* Continue where we lost track */ 2373 usb_command_wrapper(&info->done_q, 2374 info->done_q.curr); 2375 2376 USB_MTX_UNLOCK(info->xfer_mtx); 2377 } 2378 2379 /*------------------------------------------------------------------------* 2380 * usbd_callback_ss_done_defer 2381 * 2382 * This function will defer the start, stop and done callback to the 2383 * correct thread. 2384 *------------------------------------------------------------------------*/ 2385 static void 2386 usbd_callback_ss_done_defer(struct usb_xfer *xfer) 2387 { 2388 struct usb_xfer_root *info = xfer->xroot; 2389 struct usb_xfer_queue *pq = &info->done_q; 2390 2391 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2392 2393 if (pq->curr != xfer) { 2394 usbd_transfer_enqueue(pq, xfer); 2395 } 2396 if (!pq->recurse_1) { 2397 2398 /* 2399 * We have to postpone the callback due to the fact we 2400 * will have a Lock Order Reversal, LOR, if we try to 2401 * proceed ! 2402 */ 2403 (void) usb_proc_msignal(info->done_p, 2404 &info->done_m[0], &info->done_m[1]); 2405 } else { 2406 /* clear second recurse flag */ 2407 pq->recurse_2 = 0; 2408 } 2409 return; 2410 2411 } 2412 2413 /*------------------------------------------------------------------------* 2414 * usbd_callback_wrapper 2415 * 2416 * This is a wrapper for USB callbacks. This wrapper does some 2417 * auto-magic things like figuring out if we can call the callback 2418 * directly from the current context or if we need to wakeup the 2419 * interrupt process. 2420 *------------------------------------------------------------------------*/ 2421 static void 2422 usbd_callback_wrapper(struct usb_xfer_queue *pq) 2423 { 2424 struct usb_xfer *xfer = pq->curr; 2425 struct usb_xfer_root *info = xfer->xroot; 2426 2427 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED); 2428 if ((pq->recurse_3 != 0 || mtx_owned(info->xfer_mtx) == 0) && 2429 USB_IN_POLLING_MODE_FUNC() == 0) { 2430 /* 2431 * Cases that end up here: 2432 * 2433 * 5) HW interrupt done callback or other source. 2434 * 6) HW completed transfer during callback 2435 */ 2436 DPRINTFN(3, "case 5 and 6\n"); 2437 2438 /* 2439 * We have to postpone the callback due to the fact we 2440 * will have a Lock Order Reversal, LOR, if we try to 2441 * proceed! 2442 * 2443 * Postponing the callback also ensures that other USB 2444 * transfer queues get a chance. 2445 */ 2446 (void) usb_proc_msignal(info->done_p, 2447 &info->done_m[0], &info->done_m[1]); 2448 return; 2449 } 2450 /* 2451 * Cases that end up here: 2452 * 2453 * 1) We are starting a transfer 2454 * 2) We are prematurely calling back a transfer 2455 * 3) We are stopping a transfer 2456 * 4) We are doing an ordinary callback 2457 */ 2458 DPRINTFN(3, "case 1-4\n"); 2459 /* get next USB transfer in the queue */ 2460 info->done_q.curr = NULL; 2461 2462 /* set flag in case of drain */ 2463 xfer->flags_int.doing_callback = 1; 2464 2465 USB_BUS_UNLOCK(info->bus); 2466 USB_BUS_LOCK_ASSERT(info->bus, MA_NOTOWNED); 2467 2468 /* set correct USB state for callback */ 2469 if (!xfer->flags_int.transferring) { 2470 xfer->usb_state = USB_ST_SETUP; 2471 if (!xfer->flags_int.started) { 2472 /* we got stopped before we even got started */ 2473 USB_BUS_LOCK(info->bus); 2474 goto done; 2475 } 2476 } else { 2477 2478 if (usbd_callback_wrapper_sub(xfer)) { 2479 /* the callback has been deferred */ 2480 USB_BUS_LOCK(info->bus); 2481 goto done; 2482 } 2483 #if USB_HAVE_POWERD 2484 /* decrement power reference */ 2485 usbd_transfer_power_ref(xfer, -1); 2486 #endif 2487 xfer->flags_int.transferring = 0; 2488 2489 if (xfer->error) { 2490 xfer->usb_state = USB_ST_ERROR; 2491 } else { 2492 /* set transferred state */ 2493 xfer->usb_state = USB_ST_TRANSFERRED; 2494 #if USB_HAVE_BUSDMA 2495 /* sync DMA memory, if any */ 2496 if (xfer->flags_int.bdma_enable && 2497 (!xfer->flags_int.bdma_no_post_sync)) { 2498 usb_bdma_post_sync(xfer); 2499 } 2500 #endif 2501 } 2502 } 2503 2504 #if USB_HAVE_PF 2505 if (xfer->usb_state != USB_ST_SETUP) { 2506 USB_BUS_LOCK(info->bus); 2507 usbpf_xfertap(xfer, USBPF_XFERTAP_DONE); 2508 USB_BUS_UNLOCK(info->bus); 2509 } 2510 #endif 2511 /* call processing routine */ 2512 (xfer->callback) (xfer, xfer->error); 2513 2514 /* pickup the USB mutex again */ 2515 USB_BUS_LOCK(info->bus); 2516 2517 /* 2518 * Check if we got started after that we got cancelled, but 2519 * before we managed to do the callback. 2520 */ 2521 if ((!xfer->flags_int.open) && 2522 (xfer->flags_int.started) && 2523 (xfer->usb_state == USB_ST_ERROR)) { 2524 /* clear flag in case of drain */ 2525 xfer->flags_int.doing_callback = 0; 2526 /* try to loop, but not recursivly */ 2527 usb_command_wrapper(&info->done_q, xfer); 2528 return; 2529 } 2530 2531 done: 2532 /* clear flag in case of drain */ 2533 xfer->flags_int.doing_callback = 0; 2534 2535 /* 2536 * Check if we are draining. 2537 */ 2538 if (xfer->flags_int.draining && 2539 (!xfer->flags_int.transferring)) { 2540 /* "usbd_transfer_drain()" is waiting for end of transfer */ 2541 xfer->flags_int.draining = 0; 2542 cv_broadcast(&info->cv_drain); 2543 } 2544 2545 /* do the next callback, if any */ 2546 usb_command_wrapper(&info->done_q, 2547 info->done_q.curr); 2548 } 2549 2550 /*------------------------------------------------------------------------* 2551 * usb_dma_delay_done_cb 2552 * 2553 * This function is called when the DMA delay has been exectuded, and 2554 * will make sure that the callback is called to complete the USB 2555 * transfer. This code path is usually only used when there is an USB 2556 * error like USB_ERR_CANCELLED. 2557 *------------------------------------------------------------------------*/ 2558 void 2559 usb_dma_delay_done_cb(struct usb_xfer *xfer) 2560 { 2561 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2562 2563 DPRINTFN(3, "Completed %p\n", xfer); 2564 2565 /* queue callback for execution, again */ 2566 usbd_transfer_done(xfer, 0); 2567 } 2568 2569 /*------------------------------------------------------------------------* 2570 * usbd_transfer_dequeue 2571 * 2572 * - This function is used to remove an USB transfer from a USB 2573 * transfer queue. 2574 * 2575 * - This function can be called multiple times in a row. 2576 *------------------------------------------------------------------------*/ 2577 void 2578 usbd_transfer_dequeue(struct usb_xfer *xfer) 2579 { 2580 struct usb_xfer_queue *pq; 2581 2582 pq = xfer->wait_queue; 2583 if (pq) { 2584 TAILQ_REMOVE(&pq->head, xfer, wait_entry); 2585 xfer->wait_queue = NULL; 2586 } 2587 } 2588 2589 /*------------------------------------------------------------------------* 2590 * usbd_transfer_enqueue 2591 * 2592 * - This function is used to insert an USB transfer into a USB * 2593 * transfer queue. 2594 * 2595 * - This function can be called multiple times in a row. 2596 *------------------------------------------------------------------------*/ 2597 void 2598 usbd_transfer_enqueue(struct usb_xfer_queue *pq, struct usb_xfer *xfer) 2599 { 2600 /* 2601 * Insert the USB transfer into the queue, if it is not 2602 * already on a USB transfer queue: 2603 */ 2604 if (xfer->wait_queue == NULL) { 2605 xfer->wait_queue = pq; 2606 TAILQ_INSERT_TAIL(&pq->head, xfer, wait_entry); 2607 } 2608 } 2609 2610 /*------------------------------------------------------------------------* 2611 * usbd_transfer_done 2612 * 2613 * - This function is used to remove an USB transfer from the busdma, 2614 * pipe or interrupt queue. 2615 * 2616 * - This function is used to queue the USB transfer on the done 2617 * queue. 2618 * 2619 * - This function is used to stop any USB transfer timeouts. 2620 *------------------------------------------------------------------------*/ 2621 void 2622 usbd_transfer_done(struct usb_xfer *xfer, usb_error_t error) 2623 { 2624 struct usb_xfer_root *info = xfer->xroot; 2625 2626 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED); 2627 2628 DPRINTF("err=%s\n", usbd_errstr(error)); 2629 2630 /* 2631 * If we are not transferring then just return. 2632 * This can happen during transfer cancel. 2633 */ 2634 if (!xfer->flags_int.transferring) { 2635 DPRINTF("not transferring\n"); 2636 /* end of control transfer, if any */ 2637 xfer->flags_int.control_act = 0; 2638 return; 2639 } 2640 /* only set transfer error, if not already set */ 2641 if (xfer->error == USB_ERR_NORMAL_COMPLETION) 2642 xfer->error = error; 2643 2644 /* stop any callouts */ 2645 usb_callout_stop(&xfer->timeout_handle); 2646 2647 /* 2648 * If we are waiting on a queue, just remove the USB transfer 2649 * from the queue, if any. We should have the required locks 2650 * locked to do the remove when this function is called. 2651 */ 2652 usbd_transfer_dequeue(xfer); 2653 2654 #if USB_HAVE_BUSDMA 2655 if (mtx_owned(info->xfer_mtx)) { 2656 struct usb_xfer_queue *pq; 2657 2658 /* 2659 * If the private USB lock is not locked, then we assume 2660 * that the BUS-DMA load stage has been passed: 2661 */ 2662 pq = &info->dma_q; 2663 2664 if (pq->curr == xfer) { 2665 /* start the next BUS-DMA load, if any */ 2666 usb_command_wrapper(pq, NULL); 2667 } 2668 } 2669 #endif 2670 /* keep some statistics */ 2671 if (xfer->error == USB_ERR_CANCELLED) { 2672 info->udev->stats_cancelled.uds_requests 2673 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++; 2674 } else if (xfer->error != USB_ERR_NORMAL_COMPLETION) { 2675 info->udev->stats_err.uds_requests 2676 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++; 2677 } else { 2678 info->udev->stats_ok.uds_requests 2679 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++; 2680 } 2681 2682 /* call the USB transfer callback */ 2683 usbd_callback_ss_done_defer(xfer); 2684 } 2685 2686 /*------------------------------------------------------------------------* 2687 * usbd_transfer_start_cb 2688 * 2689 * This function is called to start the USB transfer when 2690 * "xfer->interval" is greater than zero, and and the endpoint type is 2691 * BULK or CONTROL. 2692 *------------------------------------------------------------------------*/ 2693 static void 2694 usbd_transfer_start_cb(void *arg) 2695 { 2696 struct usb_xfer *xfer = arg; 2697 struct usb_endpoint *ep = xfer->endpoint; 2698 2699 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2700 2701 DPRINTF("start\n"); 2702 2703 #if USB_HAVE_PF 2704 usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT); 2705 #endif 2706 2707 /* the transfer can now be cancelled */ 2708 xfer->flags_int.can_cancel_immed = 1; 2709 2710 /* start USB transfer, if no error */ 2711 if (xfer->error == 0) 2712 (ep->methods->start) (xfer); 2713 2714 /* check for transfer error */ 2715 if (xfer->error) { 2716 /* some error has happened */ 2717 usbd_transfer_done(xfer, 0); 2718 } 2719 } 2720 2721 /*------------------------------------------------------------------------* 2722 * usbd_xfer_set_stall 2723 * 2724 * This function is used to set the stall flag outside the 2725 * callback. This function is NULL safe. 2726 *------------------------------------------------------------------------*/ 2727 void 2728 usbd_xfer_set_stall(struct usb_xfer *xfer) 2729 { 2730 if (xfer == NULL) { 2731 /* tearing down */ 2732 return; 2733 } 2734 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 2735 2736 /* avoid any races by locking the USB mutex */ 2737 USB_BUS_LOCK(xfer->xroot->bus); 2738 xfer->flags.stall_pipe = 1; 2739 USB_BUS_UNLOCK(xfer->xroot->bus); 2740 } 2741 2742 int 2743 usbd_xfer_is_stalled(struct usb_xfer *xfer) 2744 { 2745 return (xfer->endpoint->is_stalled); 2746 } 2747 2748 /*------------------------------------------------------------------------* 2749 * usbd_transfer_clear_stall 2750 * 2751 * This function is used to clear the stall flag outside the 2752 * callback. This function is NULL safe. 2753 *------------------------------------------------------------------------*/ 2754 void 2755 usbd_transfer_clear_stall(struct usb_xfer *xfer) 2756 { 2757 if (xfer == NULL) { 2758 /* tearing down */ 2759 return; 2760 } 2761 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 2762 2763 /* avoid any races by locking the USB mutex */ 2764 USB_BUS_LOCK(xfer->xroot->bus); 2765 2766 xfer->flags.stall_pipe = 0; 2767 2768 USB_BUS_UNLOCK(xfer->xroot->bus); 2769 } 2770 2771 /*------------------------------------------------------------------------* 2772 * usbd_pipe_start 2773 * 2774 * This function is used to add an USB transfer to the pipe transfer list. 2775 *------------------------------------------------------------------------*/ 2776 void 2777 usbd_pipe_start(struct usb_xfer_queue *pq) 2778 { 2779 struct usb_endpoint *ep; 2780 struct usb_xfer *xfer; 2781 uint8_t type; 2782 2783 xfer = pq->curr; 2784 ep = xfer->endpoint; 2785 2786 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2787 2788 /* 2789 * If the endpoint is already stalled we do nothing ! 2790 */ 2791 if (ep->is_stalled) { 2792 return; 2793 } 2794 /* 2795 * Check if we are supposed to stall the endpoint: 2796 */ 2797 if (xfer->flags.stall_pipe) { 2798 struct usb_device *udev; 2799 struct usb_xfer_root *info; 2800 2801 /* clear stall command */ 2802 xfer->flags.stall_pipe = 0; 2803 2804 /* get pointer to USB device */ 2805 info = xfer->xroot; 2806 udev = info->udev; 2807 2808 /* 2809 * Only stall BULK and INTERRUPT endpoints. 2810 */ 2811 type = (ep->edesc->bmAttributes & UE_XFERTYPE); 2812 if ((type == UE_BULK) || 2813 (type == UE_INTERRUPT)) { 2814 uint8_t did_stall; 2815 2816 did_stall = 1; 2817 2818 if (udev->flags.usb_mode == USB_MODE_DEVICE) { 2819 (udev->bus->methods->set_stall) ( 2820 udev, ep, &did_stall); 2821 } else if (udev->ctrl_xfer[1]) { 2822 info = udev->ctrl_xfer[1]->xroot; 2823 usb_proc_msignal( 2824 USB_BUS_CS_PROC(info->bus), 2825 &udev->cs_msg[0], &udev->cs_msg[1]); 2826 } else { 2827 /* should not happen */ 2828 DPRINTFN(0, "No stall handler\n"); 2829 } 2830 /* 2831 * Check if we should stall. Some USB hardware 2832 * handles set- and clear-stall in hardware. 2833 */ 2834 if (did_stall) { 2835 /* 2836 * The transfer will be continued when 2837 * the clear-stall control endpoint 2838 * message is received. 2839 */ 2840 ep->is_stalled = 1; 2841 return; 2842 } 2843 } else if (type == UE_ISOCHRONOUS) { 2844 2845 /* 2846 * Make sure any FIFO overflow or other FIFO 2847 * error conditions go away by resetting the 2848 * endpoint FIFO through the clear stall 2849 * method. 2850 */ 2851 if (udev->flags.usb_mode == USB_MODE_DEVICE) { 2852 (udev->bus->methods->clear_stall) (udev, ep); 2853 } 2854 } 2855 } 2856 /* Set or clear stall complete - special case */ 2857 if (xfer->nframes == 0) { 2858 /* we are complete */ 2859 xfer->aframes = 0; 2860 usbd_transfer_done(xfer, 0); 2861 return; 2862 } 2863 /* 2864 * Handled cases: 2865 * 2866 * 1) Start the first transfer queued. 2867 * 2868 * 2) Re-start the current USB transfer. 2869 */ 2870 /* 2871 * Check if there should be any 2872 * pre transfer start delay: 2873 */ 2874 if (xfer->interval > 0) { 2875 type = (ep->edesc->bmAttributes & UE_XFERTYPE); 2876 if ((type == UE_BULK) || 2877 (type == UE_CONTROL)) { 2878 usbd_transfer_timeout_ms(xfer, 2879 &usbd_transfer_start_cb, 2880 xfer->interval); 2881 return; 2882 } 2883 } 2884 DPRINTF("start\n"); 2885 2886 #if USB_HAVE_PF 2887 usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT); 2888 #endif 2889 /* the transfer can now be cancelled */ 2890 xfer->flags_int.can_cancel_immed = 1; 2891 2892 /* start USB transfer, if no error */ 2893 if (xfer->error == 0) 2894 (ep->methods->start) (xfer); 2895 2896 /* check for transfer error */ 2897 if (xfer->error) { 2898 /* some error has happened */ 2899 usbd_transfer_done(xfer, 0); 2900 } 2901 } 2902 2903 /*------------------------------------------------------------------------* 2904 * usbd_transfer_timeout_ms 2905 * 2906 * This function is used to setup a timeout on the given USB 2907 * transfer. If the timeout has been deferred the callback given by 2908 * "cb" will get called after "ms" milliseconds. 2909 *------------------------------------------------------------------------*/ 2910 void 2911 usbd_transfer_timeout_ms(struct usb_xfer *xfer, 2912 void (*cb) (void *arg), usb_timeout_t ms) 2913 { 2914 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2915 2916 /* defer delay */ 2917 usb_callout_reset(&xfer->timeout_handle, 2918 USB_MS_TO_TICKS(ms) + USB_CALLOUT_ZERO_TICKS, cb, xfer); 2919 } 2920 2921 /*------------------------------------------------------------------------* 2922 * usbd_callback_wrapper_sub 2923 * 2924 * - This function will update variables in an USB transfer after 2925 * that the USB transfer is complete. 2926 * 2927 * - This function is used to start the next USB transfer on the 2928 * ep transfer queue, if any. 2929 * 2930 * NOTE: In some special cases the USB transfer will not be removed from 2931 * the pipe queue, but remain first. To enforce USB transfer removal call 2932 * this function passing the error code "USB_ERR_CANCELLED". 2933 * 2934 * Return values: 2935 * 0: Success. 2936 * Else: The callback has been deferred. 2937 *------------------------------------------------------------------------*/ 2938 static uint8_t 2939 usbd_callback_wrapper_sub(struct usb_xfer *xfer) 2940 { 2941 struct usb_endpoint *ep; 2942 struct usb_bus *bus; 2943 usb_frcount_t x; 2944 2945 bus = xfer->xroot->bus; 2946 2947 if ((!xfer->flags_int.open) && 2948 (!xfer->flags_int.did_close)) { 2949 DPRINTF("close\n"); 2950 USB_BUS_LOCK(bus); 2951 (xfer->endpoint->methods->close) (xfer); 2952 USB_BUS_UNLOCK(bus); 2953 /* only close once */ 2954 xfer->flags_int.did_close = 1; 2955 return (1); /* wait for new callback */ 2956 } 2957 /* 2958 * If we have a non-hardware induced error we 2959 * need to do the DMA delay! 2960 */ 2961 if (xfer->error != 0 && !xfer->flags_int.did_dma_delay && 2962 (xfer->error == USB_ERR_CANCELLED || 2963 xfer->error == USB_ERR_TIMEOUT || 2964 bus->methods->start_dma_delay != NULL)) { 2965 2966 usb_timeout_t temp; 2967 2968 /* only delay once */ 2969 xfer->flags_int.did_dma_delay = 1; 2970 2971 /* we can not cancel this delay */ 2972 xfer->flags_int.can_cancel_immed = 0; 2973 2974 temp = usbd_get_dma_delay(xfer->xroot->udev); 2975 2976 DPRINTFN(3, "DMA delay, %u ms, " 2977 "on %p\n", temp, xfer); 2978 2979 if (temp != 0) { 2980 USB_BUS_LOCK(bus); 2981 /* 2982 * Some hardware solutions have dedicated 2983 * events when it is safe to free DMA'ed 2984 * memory. For the other hardware platforms we 2985 * use a static delay. 2986 */ 2987 if (bus->methods->start_dma_delay != NULL) { 2988 (bus->methods->start_dma_delay) (xfer); 2989 } else { 2990 usbd_transfer_timeout_ms(xfer, 2991 (void (*)(void *))&usb_dma_delay_done_cb, 2992 temp); 2993 } 2994 USB_BUS_UNLOCK(bus); 2995 return (1); /* wait for new callback */ 2996 } 2997 } 2998 /* check actual number of frames */ 2999 if (xfer->aframes > xfer->nframes) { 3000 if (xfer->error == 0) { 3001 panic("%s: actual number of frames, %d, is " 3002 "greater than initial number of frames, %d\n", 3003 __FUNCTION__, xfer->aframes, xfer->nframes); 3004 } else { 3005 /* just set some valid value */ 3006 xfer->aframes = xfer->nframes; 3007 } 3008 } 3009 /* compute actual length */ 3010 xfer->actlen = 0; 3011 3012 for (x = 0; x != xfer->aframes; x++) { 3013 xfer->actlen += xfer->frlengths[x]; 3014 } 3015 3016 /* 3017 * Frames that were not transferred get zero actual length in 3018 * case the USB device driver does not check the actual number 3019 * of frames transferred, "xfer->aframes": 3020 */ 3021 for (; x < xfer->nframes; x++) { 3022 usbd_xfer_set_frame_len(xfer, x, 0); 3023 } 3024 3025 /* check actual length */ 3026 if (xfer->actlen > xfer->sumlen) { 3027 if (xfer->error == 0) { 3028 panic("%s: actual length, %d, is greater than " 3029 "initial length, %d\n", 3030 __FUNCTION__, xfer->actlen, xfer->sumlen); 3031 } else { 3032 /* just set some valid value */ 3033 xfer->actlen = xfer->sumlen; 3034 } 3035 } 3036 DPRINTFN(1, "xfer=%p endpoint=%p sts=%d alen=%d, slen=%d, afrm=%d, nfrm=%d\n", 3037 xfer, xfer->endpoint, xfer->error, xfer->actlen, xfer->sumlen, 3038 xfer->aframes, xfer->nframes); 3039 3040 if (xfer->error) { 3041 /* end of control transfer, if any */ 3042 xfer->flags_int.control_act = 0; 3043 3044 #if USB_HAVE_TT_SUPPORT 3045 switch (xfer->error) { 3046 case USB_ERR_NORMAL_COMPLETION: 3047 case USB_ERR_SHORT_XFER: 3048 case USB_ERR_STALLED: 3049 case USB_ERR_CANCELLED: 3050 /* nothing to do */ 3051 break; 3052 default: 3053 /* try to reset the TT, if any */ 3054 USB_BUS_LOCK(bus); 3055 uhub_tt_buffer_reset_async_locked(xfer->xroot->udev, xfer->endpoint); 3056 USB_BUS_UNLOCK(bus); 3057 break; 3058 } 3059 #endif 3060 /* check if we should block the execution queue */ 3061 if ((xfer->error != USB_ERR_CANCELLED) && 3062 (xfer->flags.pipe_bof)) { 3063 DPRINTFN(2, "xfer=%p: Block On Failure " 3064 "on endpoint=%p\n", xfer, xfer->endpoint); 3065 goto done; 3066 } 3067 } else { 3068 /* check for short transfers */ 3069 if (xfer->actlen < xfer->sumlen) { 3070 3071 /* end of control transfer, if any */ 3072 xfer->flags_int.control_act = 0; 3073 3074 if (!xfer->flags_int.short_xfer_ok) { 3075 xfer->error = USB_ERR_SHORT_XFER; 3076 if (xfer->flags.pipe_bof) { 3077 DPRINTFN(2, "xfer=%p: Block On Failure on " 3078 "Short Transfer on endpoint %p.\n", 3079 xfer, xfer->endpoint); 3080 goto done; 3081 } 3082 } 3083 } else { 3084 /* 3085 * Check if we are in the middle of a 3086 * control transfer: 3087 */ 3088 if (xfer->flags_int.control_act) { 3089 DPRINTFN(5, "xfer=%p: Control transfer " 3090 "active on endpoint=%p\n", xfer, xfer->endpoint); 3091 goto done; 3092 } 3093 } 3094 } 3095 3096 ep = xfer->endpoint; 3097 3098 /* 3099 * If the current USB transfer is completing we need to start the 3100 * next one: 3101 */ 3102 USB_BUS_LOCK(bus); 3103 if (ep->endpoint_q[xfer->stream_id].curr == xfer) { 3104 usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], NULL); 3105 3106 if (ep->endpoint_q[xfer->stream_id].curr != NULL || 3107 TAILQ_FIRST(&ep->endpoint_q[xfer->stream_id].head) != NULL) { 3108 /* there is another USB transfer waiting */ 3109 } else { 3110 /* this is the last USB transfer */ 3111 /* clear isochronous sync flag */ 3112 xfer->endpoint->is_synced = 0; 3113 } 3114 } 3115 USB_BUS_UNLOCK(bus); 3116 done: 3117 return (0); 3118 } 3119 3120 /*------------------------------------------------------------------------* 3121 * usb_command_wrapper 3122 * 3123 * This function is used to execute commands non-recursivly on an USB 3124 * transfer. 3125 *------------------------------------------------------------------------*/ 3126 void 3127 usb_command_wrapper(struct usb_xfer_queue *pq, struct usb_xfer *xfer) 3128 { 3129 if (xfer) { 3130 /* 3131 * If the transfer is not already processing, 3132 * queue it! 3133 */ 3134 if (pq->curr != xfer) { 3135 usbd_transfer_enqueue(pq, xfer); 3136 if (pq->curr != NULL) { 3137 /* something is already processing */ 3138 DPRINTFN(6, "busy %p\n", pq->curr); 3139 return; 3140 } 3141 } 3142 } else { 3143 /* Get next element in queue */ 3144 pq->curr = NULL; 3145 } 3146 3147 if (!pq->recurse_1) { 3148 3149 /* clear third recurse flag */ 3150 pq->recurse_3 = 0; 3151 3152 do { 3153 /* set two first recurse flags */ 3154 pq->recurse_1 = 1; 3155 pq->recurse_2 = 1; 3156 3157 if (pq->curr == NULL) { 3158 xfer = TAILQ_FIRST(&pq->head); 3159 if (xfer) { 3160 TAILQ_REMOVE(&pq->head, xfer, 3161 wait_entry); 3162 xfer->wait_queue = NULL; 3163 pq->curr = xfer; 3164 } else { 3165 break; 3166 } 3167 } 3168 DPRINTFN(6, "cb %p (enter)\n", pq->curr); 3169 (pq->command) (pq); 3170 DPRINTFN(6, "cb %p (leave)\n", pq->curr); 3171 3172 /* 3173 * Set third recurse flag to indicate 3174 * recursion happened: 3175 */ 3176 pq->recurse_3 = 1; 3177 3178 } while (!pq->recurse_2); 3179 3180 /* clear first recurse flag */ 3181 pq->recurse_1 = 0; 3182 3183 } else { 3184 /* clear second recurse flag */ 3185 pq->recurse_2 = 0; 3186 } 3187 } 3188 3189 /*------------------------------------------------------------------------* 3190 * usbd_ctrl_transfer_setup 3191 * 3192 * This function is used to setup the default USB control endpoint 3193 * transfer. 3194 *------------------------------------------------------------------------*/ 3195 void 3196 usbd_ctrl_transfer_setup(struct usb_device *udev) 3197 { 3198 struct usb_xfer *xfer; 3199 uint8_t no_resetup; 3200 uint8_t iface_index; 3201 3202 /* check for root HUB */ 3203 if (udev->parent_hub == NULL) 3204 return; 3205 repeat: 3206 3207 xfer = udev->ctrl_xfer[0]; 3208 if (xfer) { 3209 USB_XFER_LOCK(xfer); 3210 no_resetup = 3211 ((xfer->address == udev->address) && 3212 (udev->ctrl_ep_desc.wMaxPacketSize[0] == 3213 udev->ddesc.bMaxPacketSize)); 3214 if (udev->flags.usb_mode == USB_MODE_DEVICE) { 3215 if (no_resetup) { 3216 /* 3217 * NOTE: checking "xfer->address" and 3218 * starting the USB transfer must be 3219 * atomic! 3220 */ 3221 usbd_transfer_start(xfer); 3222 } 3223 } 3224 USB_XFER_UNLOCK(xfer); 3225 } else { 3226 no_resetup = 0; 3227 } 3228 3229 if (no_resetup) { 3230 /* 3231 * All parameters are exactly the same like before. 3232 * Just return. 3233 */ 3234 return; 3235 } 3236 /* 3237 * Update wMaxPacketSize for the default control endpoint: 3238 */ 3239 udev->ctrl_ep_desc.wMaxPacketSize[0] = 3240 udev->ddesc.bMaxPacketSize; 3241 3242 /* 3243 * Unsetup any existing USB transfer: 3244 */ 3245 usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX); 3246 3247 /* 3248 * Reset clear stall error counter. 3249 */ 3250 udev->clear_stall_errors = 0; 3251 3252 /* 3253 * Try to setup a new USB transfer for the 3254 * default control endpoint: 3255 */ 3256 iface_index = 0; 3257 if (usbd_transfer_setup(udev, &iface_index, 3258 udev->ctrl_xfer, udev->bus->control_ep_quirk ? 3259 usb_control_ep_quirk_cfg : usb_control_ep_cfg, USB_CTRL_XFER_MAX, NULL, 3260 &udev->device_mtx)) { 3261 DPRINTFN(0, "could not setup default " 3262 "USB transfer\n"); 3263 } else { 3264 goto repeat; 3265 } 3266 } 3267 3268 /*------------------------------------------------------------------------* 3269 * usbd_clear_data_toggle - factored out code 3270 * 3271 * NOTE: the intention of this function is not to reset the hardware 3272 * data toggle. 3273 *------------------------------------------------------------------------*/ 3274 void 3275 usbd_clear_stall_locked(struct usb_device *udev, struct usb_endpoint *ep) 3276 { 3277 USB_BUS_LOCK_ASSERT(udev->bus, MA_OWNED); 3278 3279 /* check that we have a valid case */ 3280 if (udev->flags.usb_mode == USB_MODE_HOST && 3281 udev->parent_hub != NULL && 3282 udev->bus->methods->clear_stall != NULL && 3283 ep->methods != NULL) { 3284 (udev->bus->methods->clear_stall) (udev, ep); 3285 } 3286 } 3287 3288 /*------------------------------------------------------------------------* 3289 * usbd_clear_data_toggle - factored out code 3290 * 3291 * NOTE: the intention of this function is not to reset the hardware 3292 * data toggle on the USB device side. 3293 *------------------------------------------------------------------------*/ 3294 void 3295 usbd_clear_data_toggle(struct usb_device *udev, struct usb_endpoint *ep) 3296 { 3297 DPRINTFN(5, "udev=%p endpoint=%p\n", udev, ep); 3298 3299 USB_BUS_LOCK(udev->bus); 3300 ep->toggle_next = 0; 3301 /* some hardware needs a callback to clear the data toggle */ 3302 usbd_clear_stall_locked(udev, ep); 3303 USB_BUS_UNLOCK(udev->bus); 3304 } 3305 3306 /*------------------------------------------------------------------------* 3307 * usbd_clear_stall_callback - factored out clear stall callback 3308 * 3309 * Input parameters: 3310 * xfer1: Clear Stall Control Transfer 3311 * xfer2: Stalled USB Transfer 3312 * 3313 * This function is NULL safe. 3314 * 3315 * Return values: 3316 * 0: In progress 3317 * Else: Finished 3318 * 3319 * Clear stall config example: 3320 * 3321 * static const struct usb_config my_clearstall = { 3322 * .type = UE_CONTROL, 3323 * .endpoint = 0, 3324 * .direction = UE_DIR_ANY, 3325 * .interval = 50, //50 milliseconds 3326 * .bufsize = sizeof(struct usb_device_request), 3327 * .timeout = 1000, //1.000 seconds 3328 * .callback = &my_clear_stall_callback, // ** 3329 * .usb_mode = USB_MODE_HOST, 3330 * }; 3331 * 3332 * ** "my_clear_stall_callback" calls "usbd_clear_stall_callback" 3333 * passing the correct parameters. 3334 *------------------------------------------------------------------------*/ 3335 uint8_t 3336 usbd_clear_stall_callback(struct usb_xfer *xfer1, 3337 struct usb_xfer *xfer2) 3338 { 3339 struct usb_device_request req; 3340 3341 if (xfer2 == NULL) { 3342 /* looks like we are tearing down */ 3343 DPRINTF("NULL input parameter\n"); 3344 return (0); 3345 } 3346 USB_XFER_LOCK_ASSERT(xfer1, MA_OWNED); 3347 USB_XFER_LOCK_ASSERT(xfer2, MA_OWNED); 3348 3349 switch (USB_GET_STATE(xfer1)) { 3350 case USB_ST_SETUP: 3351 3352 /* 3353 * pre-clear the data toggle to DATA0 ("umass.c" and 3354 * "ata-usb.c" depends on this) 3355 */ 3356 3357 usbd_clear_data_toggle(xfer2->xroot->udev, xfer2->endpoint); 3358 3359 /* setup a clear-stall packet */ 3360 3361 req.bmRequestType = UT_WRITE_ENDPOINT; 3362 req.bRequest = UR_CLEAR_FEATURE; 3363 USETW(req.wValue, UF_ENDPOINT_HALT); 3364 req.wIndex[0] = xfer2->endpoint->edesc->bEndpointAddress; 3365 req.wIndex[1] = 0; 3366 USETW(req.wLength, 0); 3367 3368 /* 3369 * "usbd_transfer_setup_sub()" will ensure that 3370 * we have sufficient room in the buffer for 3371 * the request structure! 3372 */ 3373 3374 /* copy in the transfer */ 3375 3376 usbd_copy_in(xfer1->frbuffers, 0, &req, sizeof(req)); 3377 3378 /* set length */ 3379 xfer1->frlengths[0] = sizeof(req); 3380 xfer1->nframes = 1; 3381 3382 usbd_transfer_submit(xfer1); 3383 return (0); 3384 3385 case USB_ST_TRANSFERRED: 3386 break; 3387 3388 default: /* Error */ 3389 if (xfer1->error == USB_ERR_CANCELLED) { 3390 return (0); 3391 } 3392 break; 3393 } 3394 return (1); /* Clear Stall Finished */ 3395 } 3396 3397 /*------------------------------------------------------------------------* 3398 * usbd_transfer_poll 3399 * 3400 * The following function gets called from the USB keyboard driver and 3401 * UMASS when the system has paniced. 3402 * 3403 * NOTE: It is currently not possible to resume normal operation on 3404 * the USB controller which has been polled, due to clearing of the 3405 * "up_dsleep" and "up_msleep" flags. 3406 *------------------------------------------------------------------------*/ 3407 void 3408 usbd_transfer_poll(struct usb_xfer **ppxfer, uint16_t max) 3409 { 3410 struct usb_xfer *xfer; 3411 struct usb_xfer_root *xroot; 3412 struct usb_device *udev; 3413 struct usb_proc_msg *pm; 3414 struct usb_bus *bus; 3415 uint16_t n; 3416 uint16_t drop_bus_spin; 3417 uint16_t drop_bus; 3418 uint16_t drop_xfer; 3419 3420 for (n = 0; n != max; n++) { 3421 /* Extra checks to avoid panic */ 3422 xfer = ppxfer[n]; 3423 if (xfer == NULL) 3424 continue; /* no USB transfer */ 3425 xroot = xfer->xroot; 3426 if (xroot == NULL) 3427 continue; /* no USB root */ 3428 udev = xroot->udev; 3429 if (udev == NULL) 3430 continue; /* no USB device */ 3431 bus = udev->bus; 3432 if (bus == NULL) 3433 continue; /* no BUS structure */ 3434 if (bus->methods == NULL) 3435 continue; /* no BUS methods */ 3436 if (bus->methods->xfer_poll == NULL) 3437 continue; /* no poll method */ 3438 3439 drop_bus_spin = 0; 3440 drop_bus = 0; 3441 drop_xfer = 0; 3442 3443 if (USB_IN_POLLING_MODE_FUNC() == 0) { 3444 /* make sure that the BUS spin mutex is not locked */ 3445 while (mtx_owned(&bus->bus_spin_lock)) { 3446 mtx_unlock_spin(&bus->bus_spin_lock); 3447 drop_bus_spin++; 3448 } 3449 3450 /* make sure that the BUS mutex is not locked */ 3451 while (mtx_owned(&bus->bus_mtx)) { 3452 mtx_unlock(&bus->bus_mtx); 3453 drop_bus++; 3454 } 3455 3456 /* make sure that the transfer mutex is not locked */ 3457 while (mtx_owned(xroot->xfer_mtx)) { 3458 mtx_unlock(xroot->xfer_mtx); 3459 drop_xfer++; 3460 } 3461 } 3462 3463 /* Make sure cv_signal() and cv_broadcast() is not called */ 3464 USB_BUS_CONTROL_XFER_PROC(bus)->up_msleep = 0; 3465 USB_BUS_EXPLORE_PROC(bus)->up_msleep = 0; 3466 USB_BUS_GIANT_PROC(bus)->up_msleep = 0; 3467 USB_BUS_NON_GIANT_ISOC_PROC(bus)->up_msleep = 0; 3468 USB_BUS_NON_GIANT_BULK_PROC(bus)->up_msleep = 0; 3469 3470 /* poll USB hardware */ 3471 (bus->methods->xfer_poll) (bus); 3472 3473 USB_BUS_LOCK(xroot->bus); 3474 3475 /* check for clear stall */ 3476 if (udev->ctrl_xfer[1] != NULL) { 3477 3478 /* poll clear stall start */ 3479 pm = &udev->cs_msg[0].hdr; 3480 (pm->pm_callback) (pm); 3481 /* poll clear stall done thread */ 3482 pm = &udev->ctrl_xfer[1]-> 3483 xroot->done_m[0].hdr; 3484 (pm->pm_callback) (pm); 3485 } 3486 3487 /* poll done thread */ 3488 pm = &xroot->done_m[0].hdr; 3489 (pm->pm_callback) (pm); 3490 3491 USB_BUS_UNLOCK(xroot->bus); 3492 3493 /* restore transfer mutex */ 3494 while (drop_xfer--) 3495 mtx_lock(xroot->xfer_mtx); 3496 3497 /* restore BUS mutex */ 3498 while (drop_bus--) 3499 mtx_lock(&bus->bus_mtx); 3500 3501 /* restore BUS spin mutex */ 3502 while (drop_bus_spin--) 3503 mtx_lock_spin(&bus->bus_spin_lock); 3504 } 3505 } 3506 3507 static void 3508 usbd_get_std_packet_size(struct usb_std_packet_size *ptr, 3509 uint8_t type, enum usb_dev_speed speed) 3510 { 3511 static const uint16_t intr_range_max[USB_SPEED_MAX] = { 3512 [USB_SPEED_LOW] = 8, 3513 [USB_SPEED_FULL] = 64, 3514 [USB_SPEED_HIGH] = 1024, 3515 [USB_SPEED_VARIABLE] = 1024, 3516 [USB_SPEED_SUPER] = 1024, 3517 }; 3518 3519 static const uint16_t isoc_range_max[USB_SPEED_MAX] = { 3520 [USB_SPEED_LOW] = 0, /* invalid */ 3521 [USB_SPEED_FULL] = 1023, 3522 [USB_SPEED_HIGH] = 1024, 3523 [USB_SPEED_VARIABLE] = 3584, 3524 [USB_SPEED_SUPER] = 1024, 3525 }; 3526 3527 static const uint16_t control_min[USB_SPEED_MAX] = { 3528 [USB_SPEED_LOW] = 8, 3529 [USB_SPEED_FULL] = 8, 3530 [USB_SPEED_HIGH] = 64, 3531 [USB_SPEED_VARIABLE] = 512, 3532 [USB_SPEED_SUPER] = 512, 3533 }; 3534 3535 static const uint16_t bulk_min[USB_SPEED_MAX] = { 3536 [USB_SPEED_LOW] = 8, 3537 [USB_SPEED_FULL] = 8, 3538 [USB_SPEED_HIGH] = 512, 3539 [USB_SPEED_VARIABLE] = 512, 3540 [USB_SPEED_SUPER] = 1024, 3541 }; 3542 3543 uint16_t temp; 3544 3545 memset(ptr, 0, sizeof(*ptr)); 3546 3547 switch (type) { 3548 case UE_INTERRUPT: 3549 ptr->range.max = intr_range_max[speed]; 3550 break; 3551 case UE_ISOCHRONOUS: 3552 ptr->range.max = isoc_range_max[speed]; 3553 break; 3554 default: 3555 if (type == UE_BULK) 3556 temp = bulk_min[speed]; 3557 else /* UE_CONTROL */ 3558 temp = control_min[speed]; 3559 3560 /* default is fixed */ 3561 ptr->fixed[0] = temp; 3562 ptr->fixed[1] = temp; 3563 ptr->fixed[2] = temp; 3564 ptr->fixed[3] = temp; 3565 3566 if (speed == USB_SPEED_FULL) { 3567 /* multiple sizes */ 3568 ptr->fixed[1] = 16; 3569 ptr->fixed[2] = 32; 3570 ptr->fixed[3] = 64; 3571 } 3572 if ((speed == USB_SPEED_VARIABLE) && 3573 (type == UE_BULK)) { 3574 /* multiple sizes */ 3575 ptr->fixed[2] = 1024; 3576 ptr->fixed[3] = 1536; 3577 } 3578 break; 3579 } 3580 } 3581 3582 void * 3583 usbd_xfer_softc(struct usb_xfer *xfer) 3584 { 3585 return (xfer->priv_sc); 3586 } 3587 3588 void * 3589 usbd_xfer_get_priv(struct usb_xfer *xfer) 3590 { 3591 return (xfer->priv_fifo); 3592 } 3593 3594 void 3595 usbd_xfer_set_priv(struct usb_xfer *xfer, void *ptr) 3596 { 3597 xfer->priv_fifo = ptr; 3598 } 3599 3600 uint8_t 3601 usbd_xfer_state(struct usb_xfer *xfer) 3602 { 3603 return (xfer->usb_state); 3604 } 3605 3606 void 3607 usbd_xfer_set_flag(struct usb_xfer *xfer, int flag) 3608 { 3609 switch (flag) { 3610 case USB_FORCE_SHORT_XFER: 3611 xfer->flags.force_short_xfer = 1; 3612 break; 3613 case USB_SHORT_XFER_OK: 3614 xfer->flags.short_xfer_ok = 1; 3615 break; 3616 case USB_MULTI_SHORT_OK: 3617 xfer->flags.short_frames_ok = 1; 3618 break; 3619 case USB_MANUAL_STATUS: 3620 xfer->flags.manual_status = 1; 3621 break; 3622 } 3623 } 3624 3625 void 3626 usbd_xfer_clr_flag(struct usb_xfer *xfer, int flag) 3627 { 3628 switch (flag) { 3629 case USB_FORCE_SHORT_XFER: 3630 xfer->flags.force_short_xfer = 0; 3631 break; 3632 case USB_SHORT_XFER_OK: 3633 xfer->flags.short_xfer_ok = 0; 3634 break; 3635 case USB_MULTI_SHORT_OK: 3636 xfer->flags.short_frames_ok = 0; 3637 break; 3638 case USB_MANUAL_STATUS: 3639 xfer->flags.manual_status = 0; 3640 break; 3641 } 3642 } 3643 3644 /* 3645 * The following function returns in milliseconds when the isochronous 3646 * transfer was completed by the hardware. The returned value wraps 3647 * around 65536 milliseconds. 3648 */ 3649 uint16_t 3650 usbd_xfer_get_timestamp(struct usb_xfer *xfer) 3651 { 3652 return (xfer->isoc_time_complete); 3653 } 3654 3655 /* 3656 * The following function returns non-zero if the max packet size 3657 * field was clamped to a valid value. Else it returns zero. 3658 */ 3659 uint8_t 3660 usbd_xfer_maxp_was_clamped(struct usb_xfer *xfer) 3661 { 3662 return (xfer->flags_int.maxp_was_clamped); 3663 } 3664